blob: 5582799ff68ca9ba204327169ca404370287d44a [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
telsoa014fcda012018-03-09 14:13:49 +00006#include "ClLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "ClBackendId.hpp"
arovir017c22c702018-10-09 11:16:46 +01008
David Beck3cc9a622018-10-12 10:38:31 +01009#include <armnn/Descriptors.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <InternalTypes.hpp>
11#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000012
David Beck111b5d92018-11-12 14:59:37 +000013#include <backendsCommon/BackendRegistry.hpp>
David Beck3e9e1152018-10-17 14:17:50 +010014
telsoa014fcda012018-03-09 14:13:49 +000015#include <boost/core/ignore_unused.hpp>
16
Matteo Martincighd95e9062019-01-31 15:35:59 +000017#if defined(ARMCOMPUTECL_ENABLED)
Narumol Prangnawarat74135832019-05-23 15:07:33 +010018#include <aclCommon/ArmComputeUtils.hpp>
David Beckac42efd2018-09-26 17:41:13 +010019#include "workloads/ClAdditionWorkload.hpp"
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010020#include "workloads/ClActivationWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010021#include "workloads/ClBatchNormalizationFloatWorkload.hpp"
Mike Kelly831faed2018-11-28 11:52:08 +000022#include "workloads/ClBatchToSpaceNdWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010023#include "workloads/ClConvertFp16ToFp32Workload.hpp"
24#include "workloads/ClConvertFp32ToFp16Workload.hpp"
Matthew Benthamd8067922018-10-03 17:18:04 +010025#include "workloads/ClConvolution2dWorkload.hpp"
Jim Flynn983daec2019-05-29 16:20:16 +010026#include "workloads/ClDequantizeWorkload.hpp"
Matthew Benthamd8777392018-10-08 09:38:55 +010027#include "workloads/ClDepthwiseConvolutionWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010028#include "workloads/ClDivisionFloatWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010029#include "workloads/ClFullyConnectedWorkload.hpp"
Nattapat Chaimanowongc6a41ff2019-01-29 09:56:02 +000030#include "workloads/ClGreaterWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010031#include "workloads/ClL2NormalizationFloatWorkload.hpp"
32#include "workloads/ClLstmFloatWorkload.hpp"
keidav01a959ee52018-12-19 10:04:58 +000033#include "workloads/ClMaximumWorkload.hpp"
Matteo Martincigh28dcab62018-10-19 16:40:03 +010034#include "workloads/ClMeanWorkload.hpp"
Jim Flynn69059412019-05-17 13:03:57 +010035#include "workloads/ClConcatWorkload.hpp"
saoste019292aa32019-01-08 13:55:59 +000036#include "workloads/ClMinimumWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010037#include "workloads/ClMultiplicationWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010038#include "workloads/ClNormalizationFloatWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010039#include "workloads/ClPadWorkload.hpp"
40#include "workloads/ClPermuteWorkload.hpp"
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +010041#include "workloads/ClPooling2dWorkload.hpp"
Sadik Armagan20ec2492019-05-31 09:09:44 +010042#include "workloads/ClQuantizeWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010043#include "workloads/ClSoftmaxBaseWorkload.hpp"
Sadik Armaganf4464322018-12-20 16:19:12 +000044#include "workloads/ClSpaceToBatchNdWorkload.hpp"
Narumol Prangnawarat74135832019-05-23 15:07:33 +010045#include "workloads/ClSplitterWorkload.hpp"
keidav01d74dc912018-12-10 18:16:07 +000046#include "workloads/ClStridedSliceWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010047#include "workloads/ClSubtractionWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000048#endif
49
50using namespace boost;
51
52namespace armnn
53{
arovir017c22c702018-10-09 11:16:46 +010054
telsoa014fcda012018-03-09 14:13:49 +000055namespace
56{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +010057
telsoa014fcda012018-03-09 14:13:49 +000058template<unsigned int FilterSize>
59bool IsMatchingSize2d(const TensorInfo& weightInfo)
60{
telsoa01c577f2c2018-08-31 09:22:23 +010061 // Width & Height must match.
telsoa014fcda012018-03-09 14:13:49 +000062 return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
63}
64
65template<uint32_t ValidStride>
66bool IsMatchingStride(uint32_t actualStride)
67{
68 return ValidStride == actualStride;
69}
70
71template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
72bool IsMatchingStride(uint32_t actualStride)
73{
74 return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +010075}
telsoa014fcda012018-03-09 14:13:49 +000076
arovir01085f0a42018-10-08 14:48:19 +010077bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000078{
Matteo Martincighd95e9062019-01-31 15:35:59 +000079#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000080 return true;
81#else
arovir01085f0a42018-10-08 14:48:19 +010082 if (reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000083 {
arovir01085f0a42018-10-08 14:48:19 +010084 reasonIfUnsupported.value() = "The armnn library has been built without CL support";
telsoa014fcda012018-03-09 14:13:49 +000085 }
86 return false;
87#endif
88}
89
Matteo Martincighd95e9062019-01-31 15:35:59 +000090#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000091#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
92#else
93#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
94#endif
95
Matteo Martincighd95e9062019-01-31 15:35:59 +000096#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000097template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +010098inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +000099{
100 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
101 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
102 if (!supported && reasonIfUnsupported)
103 {
arovir01085f0a42018-10-08 14:48:19 +0100104 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +0000105 }
106 return supported;
107}
108
109#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
110 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
111#else
112#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
113 return IsClBackendSupported(reasonIfUnsupported);
114#endif
115
telsoa01c577f2c2018-08-31 09:22:23 +0100116template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +0100117bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +0000118 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +0100119 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000120 Uint8Func uint8FuncPtr,
121 Params&&... params)
122{
123 return IsClBackendSupported(reasonIfUnsupported) &&
124 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
125 dataType,
126 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100127 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000128 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +0000129 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000130 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +0000131 std::forward<Params>(params)...);
132}
133
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100134} // anonymous namespace
135
136bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
137 const TensorInfo& output,
138 const ActivationDescriptor& descriptor,
139 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000140{
telsoa01c577f2c2018-08-31 09:22:23 +0100141 FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate,
142 reasonIfUnsupported,
143 input,
144 output,
145 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000146}
147
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100148bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0,
149 const TensorInfo& input1,
150 const TensorInfo& output,
151 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000152{
arovir01085f0a42018-10-08 14:48:19 +0100153 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
154 reasonIfUnsupported,
155 input0,
156 input1,
157 output);
telsoa014fcda012018-03-09 14:13:49 +0000158}
159
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100160bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
161 const TensorInfo& output,
162 const TensorInfo& mean,
163 const TensorInfo& var,
164 const TensorInfo& beta,
165 const TensorInfo& gamma,
166 const BatchNormalizationDescriptor& descriptor,
167 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000168{
telsoa01c577f2c2018-08-31 09:22:23 +0100169 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate,
170 reasonIfUnsupported,
171 input,
172 output,
173 mean,
174 var,
175 beta,
176 gamma,
177 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000178}
179
Mike Kelly831faed2018-11-28 11:52:08 +0000180bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
181 const TensorInfo& output,
182 const BatchToSpaceNdDescriptor& descriptor,
183 Optional<std::string&> reasonIfUnsupported) const
184{
185 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchToSpaceNdWorkloadValidate,
186 reasonIfUnsupported,
187 input,
188 output,
189 descriptor);
190}
191
Jim Flynn906f9462019-05-10 13:55:21 +0100192bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
193 const TensorInfo& output,
Jim Flynne242f2d2019-05-22 14:24:13 +0100194 const ConcatDescriptor& descriptor,
Jim Flynn906f9462019-05-10 13:55:21 +0100195 Optional<std::string&> reasonIfUnsupported) const
196{
Jim Flynne242f2d2019-05-22 14:24:13 +0100197 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
198 {
199 SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
200 return false;
201 }
202
203 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
204 if(concatInnerAxis < 3) // Width, height, or channels
205 {
206 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate,
207 reasonIfUnsupported,
208 inputs,
209 output,
210 descriptor);
211 }
212 else if (concatInnerAxis == 3)
213 {
214 // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
215 // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
216 for (auto& input : inputs)
217 {
218 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
219 {
220 SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
221 return false;
222 }
223 }
224 return true; // Sub-tensors support concat along batch
225 }
226 else // > 4 dimensions not supported.
227 {
228 SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
229 return false;
230 }
Jim Flynn906f9462019-05-10 13:55:21 +0100231}
232
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100233bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
234 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000235{
236 return IsSupportedForDataTypeCl(reasonIfUnsupported,
237 output.GetDataType(),
238 &TrueFunc<>,
239 &FalseFuncU8<>);
240}
241
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100242bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
243 const TensorInfo& output,
244 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000245{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100246 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate,
247 reasonIfUnsupported,
248 input,
249 output);
telsoa014fcda012018-03-09 14:13:49 +0000250}
251
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100252bool ClLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
253 const TensorInfo& output,
254 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000255{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100256 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate,
257 reasonIfUnsupported,
258 input,
259 output);
telsoa014fcda012018-03-09 14:13:49 +0000260}
261
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100262bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
263 const TensorInfo& output,
264 const Convolution2dDescriptor& descriptor,
265 const TensorInfo& weights,
266 const Optional<TensorInfo>& biases,
267 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000268{
surmeh013537c2c2018-05-18 16:31:43 +0100269 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
270 reasonIfUnsupported,
271 input,
272 output,
273 descriptor,
274 weights,
275 biases);
telsoa014fcda012018-03-09 14:13:49 +0000276}
277
Jim Flynn983daec2019-05-29 16:20:16 +0100278bool ClLayerSupport::IsDequantizeSupported(const TensorInfo& input,
279 const TensorInfo& output,
280 Optional<std::string&> reasonIfUnsupported) const
281{
282 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDequantizeWorkloadValidate,
283 reasonIfUnsupported,
284 input,
285 output);
286}
287
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100288bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
289 const TensorInfo& output,
290 const DepthwiseConvolution2dDescriptor& descriptor,
291 const TensorInfo& weights,
292 const Optional<TensorInfo>& biases,
293 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000294{
telsoa01c577f2c2018-08-31 09:22:23 +0100295 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
296 reasonIfUnsupported,
297 input,
298 output,
299 descriptor,
300 weights,
301 biases);
telsoa014fcda012018-03-09 14:13:49 +0000302}
303
Pablo Tellof0bd6832019-04-26 17:58:13 +0100304bool ClLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
305 const TensorInfo& output,
306 const DepthwiseConvolution2dDescriptor& descriptor,
307 const TensorInfo& weights,
308 const Optional<TensorInfo>& biases,
309 Optional<std::string&> reasonIfUnsupported) const
310{
311 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
312 reasonIfUnsupported,
313 input,
314 output,
315 descriptor,
316 weights,
317 biases);
318}
319
320
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100321bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0,
322 const TensorInfo& input1,
323 const TensorInfo& output,
324 Optional<std::string&> reasonIfUnsupported) const
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100325{
326 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
327 reasonIfUnsupported,
328 input0,
329 input1,
330 output);
331}
332
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100333bool ClLayerSupport::IsFloorSupported(const TensorInfo& input,
334 const TensorInfo& output,
335 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000336{
337 ignore_unused(output);
telsoa01c577f2c2018-08-31 09:22:23 +0100338 return IsClBackendSupported(reasonIfUnsupported) &&
339 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
340 input.GetDataType(),
341 &FalseFuncF16<>,
342 &TrueFunc<>,
narpra01db2b1602019-01-23 15:23:11 +0000343 &FalseFuncU8<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000344 &FalseFuncI32<>,
345 &FalseFuncU8<>);
telsoa01c577f2c2018-08-31 09:22:23 +0100346}
347
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100348bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
349 const TensorInfo& output,
350 const TensorInfo& weights,
351 const TensorInfo& biases,
352 const FullyConnectedDescriptor& descriptor,
353 Optional<std::string&> reasonIfUnsupported) const
354{
355 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate,
356 reasonIfUnsupported,
357 input,
358 output,
359 weights,
360 biases,
361 descriptor);
362}
363
Nattapat Chaimanowongc6a41ff2019-01-29 09:56:02 +0000364bool ClLayerSupport::IsGreaterSupported(const TensorInfo& input0,
365 const TensorInfo& input1,
366 const TensorInfo& output,
367 Optional<std::string&> reasonIfUnsupported) const
368{
369 FORWARD_WORKLOAD_VALIDATE_FUNC(ClGreaterWorkloadValidate,
370 reasonIfUnsupported,
371 input0,
372 input1,
373 output);
374}
375
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100376bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
377 Optional<std::string&> reasonIfUnsupported) const
378{
379 return IsSupportedForDataTypeCl(reasonIfUnsupported,
380 input.GetDataType(),
381 &TrueFunc<>,
382 &TrueFunc<>);
383}
384
385bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
386 const TensorInfo& output,
387 const L2NormalizationDescriptor& descriptor,
388 Optional<std::string&> reasonIfUnsupported) const
389{
390 FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate,
391 reasonIfUnsupported,
392 input,
393 output,
394 descriptor);
395}
396
397bool ClLayerSupport::IsLstmSupported(const TensorInfo& input,
398 const TensorInfo& outputStateIn,
399 const TensorInfo& cellStateIn,
400 const TensorInfo& scratchBuffer,
401 const TensorInfo& outputStateOut,
402 const TensorInfo& cellStateOut,
403 const TensorInfo& output,
404 const LstmDescriptor& descriptor,
405 const TensorInfo& inputToForgetWeights,
406 const TensorInfo& inputToCellWeights,
407 const TensorInfo& inputToOutputWeights,
408 const TensorInfo& recurrentToForgetWeights,
409 const TensorInfo& recurrentToCellWeights,
410 const TensorInfo& recurrentToOutputWeights,
411 const TensorInfo& forgetGateBias,
412 const TensorInfo& cellBias,
413 const TensorInfo& outputGateBias,
414 const TensorInfo* inputToInputWeights,
415 const TensorInfo* recurrentToInputWeights,
416 const TensorInfo* cellToInputWeights,
417 const TensorInfo* inputGateBias,
418 const TensorInfo* projectionWeights,
419 const TensorInfo* projectionBias,
420 const TensorInfo* cellToForgetWeights,
421 const TensorInfo* cellToOutputWeights,
422 Optional<std::string&> reasonIfUnsupported) const
telsoa01c577f2c2018-08-31 09:22:23 +0100423{
arovir01085f0a42018-10-08 14:48:19 +0100424 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate,
425 reasonIfUnsupported,
426 input,
427 outputStateIn,
428 cellStateIn,
429 scratchBuffer,
430 outputStateOut,
431 cellStateOut,
432 output,
433 descriptor,
434 inputToForgetWeights,
435 inputToCellWeights,
436 inputToOutputWeights,
437 recurrentToForgetWeights,
438 recurrentToCellWeights,
439 recurrentToOutputWeights,
440 forgetGateBias,
441 cellBias,
442 outputGateBias,
443 inputToInputWeights,
444 recurrentToInputWeights,
445 cellToInputWeights,
446 inputGateBias,
447 projectionWeights,
448 projectionBias,
449 cellToForgetWeights,
450 cellToOutputWeights);
telsoa01c577f2c2018-08-31 09:22:23 +0100451}
452
keidav01a959ee52018-12-19 10:04:58 +0000453bool ClLayerSupport::IsMaximumSupported(const TensorInfo& input0,
454 const TensorInfo& input1,
455 const TensorInfo& output,
456 Optional<std::string&> reasonIfUnsupported) const
457{
458 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMaximumWorkloadValidate,
459 reasonIfUnsupported,
460 input0,
461 input1,
462 output);
463}
464
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100465bool ClLayerSupport::IsMeanSupported(const TensorInfo& input,
466 const TensorInfo& output,
467 const MeanDescriptor& descriptor,
468 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +0100469{
Matteo Martincigh28dcab62018-10-19 16:40:03 +0100470 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMeanValidate,
471 reasonIfUnsupported,
472 input,
473 output,
474 descriptor);
narpra0132b90462018-09-13 11:07:48 +0100475}
476
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000477bool ClLayerSupport::IsMemCopySupported(const TensorInfo &input,
478 const TensorInfo &output,
479 Optional<std::string &> reasonIfUnsupported) const
480{
481 ignore_unused(input);
482 ignore_unused(output);
483 return true;
484}
485
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100486bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
Nikhil Raj8599a412018-11-19 14:51:07 +0000487 const TensorInfo& output,
Jim Flynne242f2d2019-05-22 14:24:13 +0100488 const MergerDescriptor& descriptor,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100489 Optional<std::string&> reasonIfUnsupported) const
490{
Jim Flynne242f2d2019-05-22 14:24:13 +0100491 return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100492}
493
saoste019292aa32019-01-08 13:55:59 +0000494bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0,
495 const TensorInfo& input1,
496 const TensorInfo& output,
497 Optional<std::string&> reasonIfUnsupported) const
498{
499 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMinimumWorkloadValidate,
500 reasonIfUnsupported,
501 input0,
502 input1,
503 output);
504}
505
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100506bool ClLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
507 const TensorInfo& input1,
508 const TensorInfo& output,
509 Optional<std::string&> reasonIfUnsupported) const
510{
511 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
512 reasonIfUnsupported,
513 input0,
514 input1,
515 output);
516}
517
518bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
519 const TensorInfo& output,
520 const NormalizationDescriptor& descriptor,
521 Optional<std::string&> reasonIfUnsupported) const
522{
523 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
524}
525
526bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
527 Optional<std::string&> reasonIfUnsupported) const
528{
kevmay012b4d88e2019-01-24 14:05:09 +0000529 return IsClBackendSupported(reasonIfUnsupported) &&
530 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
531 output.GetDataType(),
532 &TrueFunc<>,
533 &TrueFunc<>,
534 &TrueFunc<>,
535 &FalseFuncI32<>,
536 &TrueFunc<>);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100537}
538
539bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
540 const TensorInfo& output,
541 const PadDescriptor& descriptor,
542 Optional<std::string&> reasonIfUnsupported) const
arovir01085f0a42018-10-08 14:48:19 +0100543{
544 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate,
545 reasonIfUnsupported,
546 input,
547 output,
548 descriptor);
549}
550
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100551bool ClLayerSupport::IsPermuteSupported(const TensorInfo& input,
552 const TensorInfo& output,
553 const PermuteDescriptor& descriptor,
554 Optional<std::string&> reasonIfUnsupported) const
555{
556 ignore_unused(input);
557 ignore_unused(output);
558 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000559}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100560
561bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input,
562 const TensorInfo& output,
563 const Pooling2dDescriptor& descriptor,
564 Optional<std::string&> reasonIfUnsupported) const
565{
566 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
567}
568
Sadik Armagan20ec2492019-05-31 09:09:44 +0100569bool ClLayerSupport::IsQuantizeSupported(const TensorInfo& input,
570 const TensorInfo& output,
571 Optional<std::string&> reasonIfUnsupported) const
572{
573 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizeWorkloadValidate,
574 reasonIfUnsupported,
575 input,
576 output);
577}
578
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100579bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000580 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100581 Optional<std::string&> reasonIfUnsupported) const
582{
583 ignore_unused(input);
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000584 ignore_unused(descriptor);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100585 ignore_unused(reasonIfUnsupported);
586 return true;
587}
588
589bool ClLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
Sadik Armaganc625f002018-12-17 11:32:16 +0000590 const TensorInfo& output,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100591 Optional<std::string&> reasonIfUnsupported) const
592{
Sadik Armaganc625f002018-12-17 11:32:16 +0000593 ignore_unused(output);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100594 return IsSupportedForDataTypeCl(reasonIfUnsupported,
595 input.GetDataType(),
596 &TrueFunc<>,
597 &FalseFuncU8<>);
598}
599
600bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
601 const TensorInfo& output,
602 const SoftmaxDescriptor& descriptor,
603 Optional<std::string&> reasonIfUnsupported) const
604{
605 ignore_unused(descriptor);
606 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output);
607}
608
Sadik Armaganf4464322018-12-20 16:19:12 +0000609bool ClLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
610 const TensorInfo& output,
611 const SpaceToBatchNdDescriptor& descriptor,
612 Optional<std::string&> reasonIfUnsupported) const
613{
614 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToBatchNdWorkloadValidate,
615 reasonIfUnsupported,
616 input,
617 output,
618 descriptor);
619}
620
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100621bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
622 const ViewsDescriptor& descriptor,
623 Optional<std::string&> reasonIfUnsupported) const
624{
625 ignore_unused(descriptor);
626 return IsSupportedForDataTypeCl(reasonIfUnsupported,
627 input.GetDataType(),
628 &TrueFunc<>,
629 &TrueFunc<>);
630}
631
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100632bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
633 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
634 const ViewsDescriptor& descriptor,
635 Optional<std::string&> reasonIfUnsupported) const
636{
Narumol Prangnawarat74135832019-05-23 15:07:33 +0100637#if defined(ARMCOMPUTECL_ENABLED)
638 // Split along the last dimension, cannot use sub-tensors
639 // as width and height of the sub-tensors do not match
640 // the width and height of the parent tensor
641 // in case of input with more than 2D.
642 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
643 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
644 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
645 {
646 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSplitterWorkloadValidate,
647 reasonIfUnsupported,
648 input,
649 outputs,
650 *splitAxis.begin());
651 }
652#endif
653 for (auto output : outputs)
654 {
655 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
656 {
657 SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
658 return false;
659 }
660 }
661 return true;
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100662}
663
keidav01d74dc912018-12-10 18:16:07 +0000664bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
665 const TensorInfo& output,
666 const StridedSliceDescriptor& descriptor,
667 Optional<std::string&> reasonIfUnsupported) const
668{
669 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStridedSliceWorkloadValidate,
670 reasonIfUnsupported,
671 input,
672 output,
673 descriptor);
674}
675
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100676bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
677 const TensorInfo& input1,
678 const TensorInfo& output,
679 Optional<std::string&> reasonIfUnsupported) const
680{
681 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
682 reasonIfUnsupported,
683 input0,
684 input1,
685 output);
686}
687
688} // namespace armnn