blob: 2065998434443c9ef2560d47686d117f2aaaabc5 [file] [log] [blame]
David Monahan8a570462023-11-22 13:24:25 +00001//
David Monahanbd738082023-12-08 12:50:02 +00002// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
David Monahan8a570462023-11-22 13:24:25 +00003// SPDX-License-Identifier: MIT
4//
5
6#include "GpuFsaLayerSupport.hpp"
7
8#include <armnn/Types.hpp>
9#include <armnn/utility/IgnoreUnused.hpp>
10#include <armnn/utility/PolymorphicDowncast.hpp>
11
12#if defined(ARMCOMPUTEGPUFSA_ENABLED)
Teresa Charlinddbda6a2024-02-07 22:58:29 +000013#include "layers/GpuFsaActivation.hpp"
Teresa Charlin5bda9732024-02-08 18:46:38 +000014#include "layers/GpuFsaBatchMatMul.hpp"
Tracy Narinebc5a5d52024-02-06 15:22:41 +000015#include "layers/GpuFsaCast.hpp"
David Monahanbd738082023-12-08 12:50:02 +000016#include "layers/GpuFsaConvolution2d.hpp"
Tianle Chengfbfa49e2024-01-23 11:21:48 +000017#include "layers/GpuFsaDepthwiseConvolution2d.hpp"
Teresa Charlin20dda372024-02-08 16:23:25 +000018#include "layers/GpuFsaElementwiseBinary.hpp"
Teresa Charlina52bca22024-02-01 17:36:48 +000019#include "layers/GpuFsaPooling2d.hpp"
Declan-ARM5e90b832024-02-07 13:07:31 +000020#include "layers/GpuFsaReshape.hpp"
Teresa Charlin1d6b7312024-02-07 22:02:48 +000021#include "layers/GpuFsaResize.hpp"
John Mcloughlin33753902024-02-07 15:00:57 +000022#include "layers/GpuFsaSoftmax.hpp"
David Monahan8a570462023-11-22 13:24:25 +000023#endif
24
25#include <vector>
26
27namespace armnn
28{
29
30template<typename ... Args>
31bool IsGpuFsaBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
32{
33 IgnoreUnused(reasonIfUnsupported, (args)...);
34#if defined(ARMCOMPUTEGPUFSA_ENABLED)
35 return true;
36#else
37 if (reasonIfUnsupported)
38 {
39 reasonIfUnsupported.value() = "The armnn library has been built without CL support";
40 }
41 return false;
42#endif
43}
44
45#if defined(ARMCOMPUTEGPUFSA_ENABLED)
46#define FORWARD_GPUFSA_LAYER_SUPPORT_FUNC(expr) (expr)
47#else
48#define FORWARD_GPUFSA_LAYER_SUPPORT_FUNC(expr) IsGpuFsaBackendSupported(reasonIfUnsupported)
49#endif
50
51#if defined(ARMCOMPUTEGPUFSA_ENABLED)
52template<class FuncType, class... Args>
53inline bool CheckIsLayerSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
54{
55 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
56 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
57 if (!supported && reasonIfUnsupported)
58 {
59 reasonIfUnsupported.value() = aclStatus.error_description();
60 }
61 return supported;
62}
63
64#define FORWARD_LAYER_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
65 return CheckIsLayerSupported(func, reasonIfUnsupported, __VA_ARGS__);
66#else
67#define FORWARD_LAYER_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
68 return IsGpuFsaBackendSupported(reasonIfUnsupported, __VA_ARGS__);
69#endif
70
71bool GpuFsaLayerSupport::IsLayerSupported(const LayerType& type,
72 const std::vector<TensorInfo>& infos,
73 const BaseDescriptor& descriptor,
74 const Optional<LstmInputParamsInfo>& lstmParamsInfo,
75 const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmInputParamsInfo,
76 Optional<std::string&> reasonIfUnsupported) const
77{
78 IgnoreUnused(lstmParamsInfo);
79 IgnoreUnused(quantizedLstmInputParamsInfo);
80
Tracy Narinebc5a5d52024-02-06 15:22:41 +000081 switch (type)
82 {
Teresa Charlinddbda6a2024-02-07 22:58:29 +000083 case LayerType::Activation:
84 {
85 if (infos.size() != 2)
86 {
87 throw InvalidArgumentException("Invalid number of Activation TensorInfos. "
88 "TensorInfos should be of format: {input, output}.");
89 }
90
91 auto desc = PolymorphicDowncast<const ActivationDescriptor*>(&descriptor);
92 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaActivationValidate,
93 reasonIfUnsupported,
94 infos[0],
95 *desc);
96 }
Teresa Charlin5bda9732024-02-08 18:46:38 +000097 case LayerType::BatchMatMul:
98 {
99 if (infos.size() != 3)
100 {
101 throw InvalidArgumentException("Invalid number of BatchMatMul TensorInfos. "
102 "TensorInfos should be of format: {input0, input1 output}.");
103 }
104
105 auto desc = PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor);
Teresa Charlin5bda9732024-02-08 18:46:38 +0000106 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaBatchMatMulValidate,
107 reasonIfUnsupported,
108 infos[0],
109 infos[1],
110 *desc);
111 }
Tracy Narinebc5a5d52024-02-06 15:22:41 +0000112 case LayerType::Cast:
113 {
114 if (infos.size() != 2)
115 {
116 throw InvalidArgumentException("Invalid number of cast TensorInfos. "
117 "TensorInfos should be of format: {input, output}.");
118 }
Teresa Charlinddbda6a2024-02-07 22:58:29 +0000119
Tracy Narinebc5a5d52024-02-06 15:22:41 +0000120 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaCastValidate,
121 reasonIfUnsupported,
122 infos[0],
123 infos[1]);
124 }
David Monahan8a570462023-11-22 13:24:25 +0000125 case LayerType::Convolution2d:
126 {
127 if (infos.size() != 4)
128 {
129 throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
130 "TensorInfos should be of format: {input, output, weights, biases}.");
131 }
132
Tracy Narinee7d27852024-01-26 09:13:19 +0000133 auto desc = PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor);
David Monahan8a570462023-11-22 13:24:25 +0000134 if (infos[3] == TensorInfo())
135 {
136 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaConvolution2dValidate,
137 reasonIfUnsupported,
138 infos[0],
Tracy Narinee7d27852024-01-26 09:13:19 +0000139 *desc,
David Monahan8a570462023-11-22 13:24:25 +0000140 infos[2],
141 EmptyOptional());
142 }
143 else
144 {
145 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaConvolution2dValidate,
146 reasonIfUnsupported,
147 infos[0],
Tracy Narinee7d27852024-01-26 09:13:19 +0000148 *desc,
David Monahan8a570462023-11-22 13:24:25 +0000149 infos[2],
150 infos[3]);
151 }
152 }
Tianle Chengfbfa49e2024-01-23 11:21:48 +0000153 case LayerType::DepthwiseConvolution2d:
154 {
155 if (infos.size() != 4)
156 {
157 throw InvalidArgumentException("Invalid number of DepthwiseConvolution2dDescriptor TensorInfos. "
158 "TensorInfos should be of format: {input, output, weights, biases}.");
159 }
160
Tracy Narinebc5a5d52024-02-06 15:22:41 +0000161 auto desc = PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor);
Tianle Chengfbfa49e2024-01-23 11:21:48 +0000162 if (infos[3] == TensorInfo())
163 {
164 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaDepthwiseConvolution2dValidate,
165 reasonIfUnsupported,
166 infos[0],
Tracy Narinee7d27852024-01-26 09:13:19 +0000167 *desc,
Tianle Chengfbfa49e2024-01-23 11:21:48 +0000168 infos[2],
169 EmptyOptional());
170 }
171 else
172 {
173 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaDepthwiseConvolution2dValidate,
174 reasonIfUnsupported,
175 infos[0],
Tracy Narinee7d27852024-01-26 09:13:19 +0000176 *desc,
Tianle Chengfbfa49e2024-01-23 11:21:48 +0000177 infos[2],
178 infos[3]);
179 }
180 }
Tracy Narinee7d27852024-01-26 09:13:19 +0000181 case LayerType::ElementwiseBinary:
182 {
183 if (infos.size() != 3)
184 {
185 throw InvalidArgumentException("Invalid number of ElementwiseBinary TensorInfos. "
Teresa Charlin20dda372024-02-08 16:23:25 +0000186 "TensorInfos should be of format: {input0, input1, output}.");
Tracy Narinee7d27852024-01-26 09:13:19 +0000187 }
188
Tracy Narinebc5a5d52024-02-06 15:22:41 +0000189 auto desc = PolymorphicDowncast<const ElementwiseBinaryDescriptor*>(&descriptor);
Teresa Charlin20dda372024-02-08 16:23:25 +0000190 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaElementwiseBinaryValidate,
191 reasonIfUnsupported,
192 infos[0],
193 infos[1],
194 *desc);
Teresa Charlina52bca22024-02-01 17:36:48 +0000195 }
196 case LayerType::Pooling2d:
197 {
198 if (infos.size() != 2)
199 {
200 throw InvalidArgumentException("Invalid number of Pooling2d TensorInfos. "
201 "TensorInfos should be of format: {input, output}.");
202 }
203
204 auto desc = PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor);
Teresa Charlina52bca22024-02-01 17:36:48 +0000205 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaPooling2dValidate,
206 reasonIfUnsupported,
207 infos[0],
208 *desc);
Tracy Narinee7d27852024-01-26 09:13:19 +0000209 }
Declan-ARM5e90b832024-02-07 13:07:31 +0000210 case LayerType::Reshape:
211 {
212 if (infos.size() != 2)
213 {
214 throw InvalidArgumentException("Invalid number of Reshape TensorInfos. "
215 "TensorInfos should be of format: { input, output }.");
216 }
217
218 auto desc = PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor);
219
220 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaReshapeValidate,
221 reasonIfUnsupported,
222 infos[0],
223 *desc);
224 }
Teresa Charlin1d6b7312024-02-07 22:02:48 +0000225 case LayerType::Resize:
226 {
227 if (infos.size() != 2)
228 {
229 throw InvalidArgumentException("Invalid number of Resize TensorInfos. "
230 "TensorInfos should be of format: {input, output}.");
231 }
232
233 auto desc = PolymorphicDowncast<const ResizeDescriptor*>(&descriptor);
Teresa Charlin1d6b7312024-02-07 22:02:48 +0000234 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaResizeValidate,
235 reasonIfUnsupported,
236 infos[0],
237 *desc);
238 }
John Mcloughlin33753902024-02-07 15:00:57 +0000239 case LayerType::Softmax:
240 {
241 if (infos.size() != 2)
242 {
243 throw InvalidArgumentException("Invalid number of Softmax TensorInfos. "
244 "TensorInfos should be of format: {input, output}.");
245 }
246
247 auto desc = PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor);
248 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaSoftmaxValidate,
249 reasonIfUnsupported,
250 infos[0],
251 infos[1],
252 *desc);
253 }
David Monahan8a570462023-11-22 13:24:25 +0000254 case LayerType::Constant:
255 case LayerType::Input:
256 case LayerType::Output:
257 return IsGpuFsaBackendSupported(reasonIfUnsupported, infos[0]);
258 default:
259 // Layers not supported in the GpuFsa backend.
260 return false;
261 }
262}
263
264} // namespace armnn