blob: 98fb4300b8467d3f2a66f884aded3e5c16d474db [file] [log] [blame]
David Monahan8a570462023-11-22 13:24:25 +00001//
David Monahanbd738082023-12-08 12:50:02 +00002// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
David Monahan8a570462023-11-22 13:24:25 +00003// SPDX-License-Identifier: MIT
4//
5
6#include "GpuFsaLayerSupport.hpp"
7
8#include <armnn/Types.hpp>
9#include <armnn/utility/IgnoreUnused.hpp>
10#include <armnn/utility/PolymorphicDowncast.hpp>
11
12#if defined(ARMCOMPUTEGPUFSA_ENABLED)
Teresa Charlin5bda9732024-02-08 18:46:38 +000013#include "layers/GpuFsaBatchMatMul.hpp"
Tracy Narinebc5a5d52024-02-06 15:22:41 +000014#include "layers/GpuFsaCast.hpp"
David Monahanbd738082023-12-08 12:50:02 +000015#include "layers/GpuFsaConvolution2d.hpp"
Tianle Chengfbfa49e2024-01-23 11:21:48 +000016#include "layers/GpuFsaDepthwiseConvolution2d.hpp"
Teresa Charlin20dda372024-02-08 16:23:25 +000017#include "layers/GpuFsaElementwiseBinary.hpp"
Teresa Charlina52bca22024-02-01 17:36:48 +000018#include "layers/GpuFsaPooling2d.hpp"
Teresa Charlin1d6b7312024-02-07 22:02:48 +000019#include "layers/GpuFsaResize.hpp"
John Mcloughlin33753902024-02-07 15:00:57 +000020#include "layers/GpuFsaSoftmax.hpp"
David Monahan8a570462023-11-22 13:24:25 +000021#endif
22
23#include <vector>
24
25namespace armnn
26{
27
28template<typename ... Args>
29bool IsGpuFsaBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
30{
31 IgnoreUnused(reasonIfUnsupported, (args)...);
32#if defined(ARMCOMPUTEGPUFSA_ENABLED)
33 return true;
34#else
35 if (reasonIfUnsupported)
36 {
37 reasonIfUnsupported.value() = "The armnn library has been built without CL support";
38 }
39 return false;
40#endif
41}
42
43#if defined(ARMCOMPUTEGPUFSA_ENABLED)
44#define FORWARD_GPUFSA_LAYER_SUPPORT_FUNC(expr) (expr)
45#else
46#define FORWARD_GPUFSA_LAYER_SUPPORT_FUNC(expr) IsGpuFsaBackendSupported(reasonIfUnsupported)
47#endif
48
49#if defined(ARMCOMPUTEGPUFSA_ENABLED)
50template<class FuncType, class... Args>
51inline bool CheckIsLayerSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
52{
53 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
54 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
55 if (!supported && reasonIfUnsupported)
56 {
57 reasonIfUnsupported.value() = aclStatus.error_description();
58 }
59 return supported;
60}
61
62#define FORWARD_LAYER_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
63 return CheckIsLayerSupported(func, reasonIfUnsupported, __VA_ARGS__);
64#else
65#define FORWARD_LAYER_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
66 return IsGpuFsaBackendSupported(reasonIfUnsupported, __VA_ARGS__);
67#endif
68
69bool GpuFsaLayerSupport::IsLayerSupported(const LayerType& type,
70 const std::vector<TensorInfo>& infos,
71 const BaseDescriptor& descriptor,
72 const Optional<LstmInputParamsInfo>& lstmParamsInfo,
73 const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmInputParamsInfo,
74 Optional<std::string&> reasonIfUnsupported) const
75{
76 IgnoreUnused(lstmParamsInfo);
77 IgnoreUnused(quantizedLstmInputParamsInfo);
78
Tracy Narinebc5a5d52024-02-06 15:22:41 +000079 switch (type)
80 {
Teresa Charlin5bda9732024-02-08 18:46:38 +000081 case LayerType::BatchMatMul:
82 {
83 if (infos.size() != 3)
84 {
85 throw InvalidArgumentException("Invalid number of BatchMatMul TensorInfos. "
86 "TensorInfos should be of format: {input0, input1 output}.");
87 }
88
89 auto desc = PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor);
90
91 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaBatchMatMulValidate,
92 reasonIfUnsupported,
93 infos[0],
94 infos[1],
95 *desc);
96 }
Tracy Narinebc5a5d52024-02-06 15:22:41 +000097 case LayerType::Cast:
98 {
99 if (infos.size() != 2)
100 {
101 throw InvalidArgumentException("Invalid number of cast TensorInfos. "
102 "TensorInfos should be of format: {input, output}.");
103 }
104 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaCastValidate,
105 reasonIfUnsupported,
106 infos[0],
107 infos[1]);
108 }
David Monahan8a570462023-11-22 13:24:25 +0000109 case LayerType::Convolution2d:
110 {
111 if (infos.size() != 4)
112 {
113 throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
114 "TensorInfos should be of format: {input, output, weights, biases}.");
115 }
116
Tracy Narinee7d27852024-01-26 09:13:19 +0000117 auto desc = PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor);
David Monahan8a570462023-11-22 13:24:25 +0000118 if (infos[3] == TensorInfo())
119 {
120 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaConvolution2dValidate,
121 reasonIfUnsupported,
122 infos[0],
Tracy Narinee7d27852024-01-26 09:13:19 +0000123 *desc,
David Monahan8a570462023-11-22 13:24:25 +0000124 infos[2],
125 EmptyOptional());
126 }
127 else
128 {
129 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaConvolution2dValidate,
130 reasonIfUnsupported,
131 infos[0],
Tracy Narinee7d27852024-01-26 09:13:19 +0000132 *desc,
David Monahan8a570462023-11-22 13:24:25 +0000133 infos[2],
134 infos[3]);
135 }
136 }
Tianle Chengfbfa49e2024-01-23 11:21:48 +0000137 case LayerType::DepthwiseConvolution2d:
138 {
139 if (infos.size() != 4)
140 {
141 throw InvalidArgumentException("Invalid number of DepthwiseConvolution2dDescriptor TensorInfos. "
142 "TensorInfos should be of format: {input, output, weights, biases}.");
143 }
144
Tracy Narinebc5a5d52024-02-06 15:22:41 +0000145 auto desc = PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor);
Tianle Chengfbfa49e2024-01-23 11:21:48 +0000146 if (infos[3] == TensorInfo())
147 {
148 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaDepthwiseConvolution2dValidate,
149 reasonIfUnsupported,
150 infos[0],
Tracy Narinee7d27852024-01-26 09:13:19 +0000151 *desc,
Tianle Chengfbfa49e2024-01-23 11:21:48 +0000152 infos[2],
153 EmptyOptional());
154 }
155 else
156 {
157 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaDepthwiseConvolution2dValidate,
158 reasonIfUnsupported,
159 infos[0],
Tracy Narinee7d27852024-01-26 09:13:19 +0000160 *desc,
Tianle Chengfbfa49e2024-01-23 11:21:48 +0000161 infos[2],
162 infos[3]);
163 }
164 }
Tracy Narinee7d27852024-01-26 09:13:19 +0000165 case LayerType::ElementwiseBinary:
166 {
167 if (infos.size() != 3)
168 {
169 throw InvalidArgumentException("Invalid number of ElementwiseBinary TensorInfos. "
Teresa Charlin20dda372024-02-08 16:23:25 +0000170 "TensorInfos should be of format: {input0, input1, output}.");
Tracy Narinee7d27852024-01-26 09:13:19 +0000171 }
172
Tracy Narinebc5a5d52024-02-06 15:22:41 +0000173 auto desc = PolymorphicDowncast<const ElementwiseBinaryDescriptor*>(&descriptor);
Teresa Charlin20dda372024-02-08 16:23:25 +0000174 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaElementwiseBinaryValidate,
175 reasonIfUnsupported,
176 infos[0],
177 infos[1],
178 *desc);
Teresa Charlina52bca22024-02-01 17:36:48 +0000179 }
180 case LayerType::Pooling2d:
181 {
182 if (infos.size() != 2)
183 {
184 throw InvalidArgumentException("Invalid number of Pooling2d TensorInfos. "
185 "TensorInfos should be of format: {input, output}.");
186 }
187
188 auto desc = PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor);
189
190 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaPooling2dValidate,
191 reasonIfUnsupported,
192 infos[0],
193 *desc);
Tracy Narinee7d27852024-01-26 09:13:19 +0000194 }
Teresa Charlin1d6b7312024-02-07 22:02:48 +0000195 case LayerType::Resize:
196 {
197 if (infos.size() != 2)
198 {
199 throw InvalidArgumentException("Invalid number of Resize TensorInfos. "
200 "TensorInfos should be of format: {input, output}.");
201 }
202
203 auto desc = PolymorphicDowncast<const ResizeDescriptor*>(&descriptor);
204
205 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaResizeValidate,
206 reasonIfUnsupported,
207 infos[0],
208 *desc);
209 }
John Mcloughlin33753902024-02-07 15:00:57 +0000210 case LayerType::Softmax:
211 {
212 if (infos.size() != 2)
213 {
214 throw InvalidArgumentException("Invalid number of Softmax TensorInfos. "
215 "TensorInfos should be of format: {input, output}.");
216 }
217
218 auto desc = PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor);
219 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaSoftmaxValidate,
220 reasonIfUnsupported,
221 infos[0],
222 infos[1],
223 *desc);
224 }
David Monahan8a570462023-11-22 13:24:25 +0000225 case LayerType::Constant:
226 case LayerType::Input:
227 case LayerType::Output:
228 return IsGpuFsaBackendSupported(reasonIfUnsupported, infos[0]);
229 default:
230 // Layers not supported in the GpuFsa backend.
231 return false;
232 }
233}
234
235} // namespace armnn