blob: 85fb03a15703392ab759f69e75f61c4aeda01bc4 [file] [log] [blame]
David Monahan8a570462023-11-22 13:24:25 +00001//
David Monahanbd738082023-12-08 12:50:02 +00002// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
David Monahan8a570462023-11-22 13:24:25 +00003// SPDX-License-Identifier: MIT
4//
5
6#include "GpuFsaLayerSupport.hpp"
7
8#include <armnn/Types.hpp>
9#include <armnn/utility/IgnoreUnused.hpp>
10#include <armnn/utility/PolymorphicDowncast.hpp>
11
12#if defined(ARMCOMPUTEGPUFSA_ENABLED)
Teresa Charlin5bda9732024-02-08 18:46:38 +000013#include "layers/GpuFsaBatchMatMul.hpp"
Tracy Narinebc5a5d52024-02-06 15:22:41 +000014#include "layers/GpuFsaCast.hpp"
David Monahanbd738082023-12-08 12:50:02 +000015#include "layers/GpuFsaConvolution2d.hpp"
Tianle Chengfbfa49e2024-01-23 11:21:48 +000016#include "layers/GpuFsaDepthwiseConvolution2d.hpp"
Teresa Charlin20dda372024-02-08 16:23:25 +000017#include "layers/GpuFsaElementwiseBinary.hpp"
Teresa Charlina52bca22024-02-01 17:36:48 +000018#include "layers/GpuFsaPooling2d.hpp"
Teresa Charlin1d6b7312024-02-07 22:02:48 +000019#include "layers/GpuFsaResize.hpp"
David Monahan8a570462023-11-22 13:24:25 +000020#endif
21
22#include <vector>
23
24namespace armnn
25{
26
27template<typename ... Args>
28bool IsGpuFsaBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
29{
30 IgnoreUnused(reasonIfUnsupported, (args)...);
31#if defined(ARMCOMPUTEGPUFSA_ENABLED)
32 return true;
33#else
34 if (reasonIfUnsupported)
35 {
36 reasonIfUnsupported.value() = "The armnn library has been built without CL support";
37 }
38 return false;
39#endif
40}
41
42#if defined(ARMCOMPUTEGPUFSA_ENABLED)
43#define FORWARD_GPUFSA_LAYER_SUPPORT_FUNC(expr) (expr)
44#else
45#define FORWARD_GPUFSA_LAYER_SUPPORT_FUNC(expr) IsGpuFsaBackendSupported(reasonIfUnsupported)
46#endif
47
48#if defined(ARMCOMPUTEGPUFSA_ENABLED)
49template<class FuncType, class... Args>
50inline bool CheckIsLayerSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
51{
52 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
53 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
54 if (!supported && reasonIfUnsupported)
55 {
56 reasonIfUnsupported.value() = aclStatus.error_description();
57 }
58 return supported;
59}
60
61#define FORWARD_LAYER_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
62 return CheckIsLayerSupported(func, reasonIfUnsupported, __VA_ARGS__);
63#else
64#define FORWARD_LAYER_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
65 return IsGpuFsaBackendSupported(reasonIfUnsupported, __VA_ARGS__);
66#endif
67
68bool GpuFsaLayerSupport::IsLayerSupported(const LayerType& type,
69 const std::vector<TensorInfo>& infos,
70 const BaseDescriptor& descriptor,
71 const Optional<LstmInputParamsInfo>& lstmParamsInfo,
72 const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmInputParamsInfo,
73 Optional<std::string&> reasonIfUnsupported) const
74{
75 IgnoreUnused(lstmParamsInfo);
76 IgnoreUnused(quantizedLstmInputParamsInfo);
77
Tracy Narinebc5a5d52024-02-06 15:22:41 +000078 switch (type)
79 {
Teresa Charlin5bda9732024-02-08 18:46:38 +000080 case LayerType::BatchMatMul:
81 {
82 if (infos.size() != 3)
83 {
84 throw InvalidArgumentException("Invalid number of BatchMatMul TensorInfos. "
85 "TensorInfos should be of format: {input0, input1 output}.");
86 }
87
88 auto desc = PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor);
89
90 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaBatchMatMulValidate,
91 reasonIfUnsupported,
92 infos[0],
93 infos[1],
94 *desc);
95 }
Tracy Narinebc5a5d52024-02-06 15:22:41 +000096 case LayerType::Cast:
97 {
98 if (infos.size() != 2)
99 {
100 throw InvalidArgumentException("Invalid number of cast TensorInfos. "
101 "TensorInfos should be of format: {input, output}.");
102 }
103 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaCastValidate,
104 reasonIfUnsupported,
105 infos[0],
106 infos[1]);
107 }
David Monahan8a570462023-11-22 13:24:25 +0000108 case LayerType::Convolution2d:
109 {
110 if (infos.size() != 4)
111 {
112 throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
113 "TensorInfos should be of format: {input, output, weights, biases}.");
114 }
115
Tracy Narinee7d27852024-01-26 09:13:19 +0000116 auto desc = PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor);
David Monahan8a570462023-11-22 13:24:25 +0000117 if (infos[3] == TensorInfo())
118 {
119 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaConvolution2dValidate,
120 reasonIfUnsupported,
121 infos[0],
Tracy Narinee7d27852024-01-26 09:13:19 +0000122 *desc,
David Monahan8a570462023-11-22 13:24:25 +0000123 infos[2],
124 EmptyOptional());
125 }
126 else
127 {
128 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaConvolution2dValidate,
129 reasonIfUnsupported,
130 infos[0],
Tracy Narinee7d27852024-01-26 09:13:19 +0000131 *desc,
David Monahan8a570462023-11-22 13:24:25 +0000132 infos[2],
133 infos[3]);
134 }
135 }
Tianle Chengfbfa49e2024-01-23 11:21:48 +0000136 case LayerType::DepthwiseConvolution2d:
137 {
138 if (infos.size() != 4)
139 {
140 throw InvalidArgumentException("Invalid number of DepthwiseConvolution2dDescriptor TensorInfos. "
141 "TensorInfos should be of format: {input, output, weights, biases}.");
142 }
143
Tracy Narinebc5a5d52024-02-06 15:22:41 +0000144 auto desc = PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor);
Tianle Chengfbfa49e2024-01-23 11:21:48 +0000145 if (infos[3] == TensorInfo())
146 {
147 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaDepthwiseConvolution2dValidate,
148 reasonIfUnsupported,
149 infos[0],
Tracy Narinee7d27852024-01-26 09:13:19 +0000150 *desc,
Tianle Chengfbfa49e2024-01-23 11:21:48 +0000151 infos[2],
152 EmptyOptional());
153 }
154 else
155 {
156 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaDepthwiseConvolution2dValidate,
157 reasonIfUnsupported,
158 infos[0],
Tracy Narinee7d27852024-01-26 09:13:19 +0000159 *desc,
Tianle Chengfbfa49e2024-01-23 11:21:48 +0000160 infos[2],
161 infos[3]);
162 }
163 }
Tracy Narinee7d27852024-01-26 09:13:19 +0000164 case LayerType::ElementwiseBinary:
165 {
166 if (infos.size() != 3)
167 {
168 throw InvalidArgumentException("Invalid number of ElementwiseBinary TensorInfos. "
Teresa Charlin20dda372024-02-08 16:23:25 +0000169 "TensorInfos should be of format: {input0, input1, output}.");
Tracy Narinee7d27852024-01-26 09:13:19 +0000170 }
171
Tracy Narinebc5a5d52024-02-06 15:22:41 +0000172 auto desc = PolymorphicDowncast<const ElementwiseBinaryDescriptor*>(&descriptor);
Teresa Charlin20dda372024-02-08 16:23:25 +0000173 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaElementwiseBinaryValidate,
174 reasonIfUnsupported,
175 infos[0],
176 infos[1],
177 *desc);
Teresa Charlina52bca22024-02-01 17:36:48 +0000178 }
179 case LayerType::Pooling2d:
180 {
181 if (infos.size() != 2)
182 {
183 throw InvalidArgumentException("Invalid number of Pooling2d TensorInfos. "
184 "TensorInfos should be of format: {input, output}.");
185 }
186
187 auto desc = PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor);
188
189 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaPooling2dValidate,
190 reasonIfUnsupported,
191 infos[0],
192 *desc);
Tracy Narinee7d27852024-01-26 09:13:19 +0000193 }
Teresa Charlin1d6b7312024-02-07 22:02:48 +0000194 case LayerType::Resize:
195 {
196 if (infos.size() != 2)
197 {
198 throw InvalidArgumentException("Invalid number of Resize TensorInfos. "
199 "TensorInfos should be of format: {input, output}.");
200 }
201
202 auto desc = PolymorphicDowncast<const ResizeDescriptor*>(&descriptor);
203
204 FORWARD_LAYER_VALIDATE_FUNC(GpuFsaResizeValidate,
205 reasonIfUnsupported,
206 infos[0],
207 *desc);
208 }
David Monahan8a570462023-11-22 13:24:25 +0000209 case LayerType::Constant:
210 case LayerType::Input:
211 case LayerType::Output:
212 return IsGpuFsaBackendSupported(reasonIfUnsupported, infos[0]);
213 default:
214 // Layers not supported in the GpuFsa backend.
215 return false;
216 }
217}
218
219} // namespace armnn