blob: 7ac054cad9e11c6418386b563ec8edcfb308807a [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "armnn/LayerSupport.hpp"
6
7#include "backends/RefLayerSupport.hpp"
8#include "backends/NeonLayerSupport.hpp"
9#include "backends/ClLayerSupport.hpp"
10
11#include <boost/assert.hpp>
12
13#include <cstring>
14#include <algorithm>
15
16namespace armnn
17{
18
telsoa01c577f2c2018-08-31 09:22:23 +010019/// Helper function to copy a full string to a truncated version.
telsoa014fcda012018-03-09 14:13:49 +000020void CopyErrorMessage(char* truncatedString, const char* fullString, size_t maxLength)
21{
22 if(truncatedString != nullptr)
23 {
24 size_t copyLength = std::min(maxLength, strlen(fullString));
25 std::strncpy(truncatedString, fullString, copyLength);
telsoa01c577f2c2018-08-31 09:22:23 +010026 // Ensure null-terminated string.
telsoa014fcda012018-03-09 14:13:49 +000027 truncatedString[copyLength] = '\0';
28 }
29}
30
31// Helper macro to avoid code duplication.
telsoa01c577f2c2018-08-31 09:22:23 +010032// Forwards function func to funcRef, funcNeon or funcCl, depending on the value of compute.
telsoa014fcda012018-03-09 14:13:49 +000033#define FORWARD_LAYER_SUPPORT_FUNC(compute, func, ...) \
34 std::string reasonIfUnsupportedFull; \
35 bool isSupported; \
36 switch(compute) \
37 { \
38 case Compute::CpuRef: \
39 isSupported = func##Ref(__VA_ARGS__, &reasonIfUnsupportedFull); \
40 break; \
41 case Compute::CpuAcc: \
42 isSupported = func##Neon(__VA_ARGS__, &reasonIfUnsupportedFull); \
43 break; \
44 case Compute::GpuAcc: \
45 isSupported = func##Cl(__VA_ARGS__, &reasonIfUnsupportedFull); \
46 break; \
47 default: \
48 isSupported = func##Ref(__VA_ARGS__, &reasonIfUnsupportedFull); \
49 break; \
50 } \
51 CopyErrorMessage(reasonIfUnsupported, reasonIfUnsupportedFull.c_str(), reasonIfUnsupportedMaxLength); \
52 return isSupported;
53
54bool CheckTensorDataTypesEqual(const TensorInfo& input0, const TensorInfo& input1)
55{
56 return input0.GetDataType() == input1.GetDataType();
57}
58
59bool IsActivationSupported(Compute compute,
60 const TensorInfo& input,
telsoa01c577f2c2018-08-31 09:22:23 +010061 const TensorInfo& output,
telsoa014fcda012018-03-09 14:13:49 +000062 const ActivationDescriptor& descriptor,
63 char* reasonIfUnsupported,
64 size_t reasonIfUnsupportedMaxLength)
65{
telsoa01c577f2c2018-08-31 09:22:23 +010066 FORWARD_LAYER_SUPPORT_FUNC(compute, IsActivationSupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +000067}
68
69bool IsAdditionSupported(Compute compute,
70 const TensorInfo& input0,
71 const TensorInfo& input1,
72 const TensorInfo& output,
73 char* reasonIfUnsupported,
74 size_t reasonIfUnsupportedMaxLength)
75{
76 if(!CheckTensorDataTypesEqual(input0, input1))
77 {
78 return false;
79 }
80
81 FORWARD_LAYER_SUPPORT_FUNC(compute, IsAdditionSupported, input0, input1, output);
82}
83
84bool IsBatchNormalizationSupported(Compute compute,
85 const TensorInfo& input,
telsoa01c577f2c2018-08-31 09:22:23 +010086 const TensorInfo& output,
87 const TensorInfo& mean,
88 const TensorInfo& var,
89 const TensorInfo& beta,
90 const TensorInfo& gamma,
telsoa014fcda012018-03-09 14:13:49 +000091 const BatchNormalizationDescriptor& descriptor,
92 char* reasonIfUnsupported,
93 size_t reasonIfUnsupportedMaxLength)
94{
telsoa01c577f2c2018-08-31 09:22:23 +010095 FORWARD_LAYER_SUPPORT_FUNC(compute,
96 IsBatchNormalizationSupported,
97 input,
98 output,
99 mean,
100 var,
101 beta,
102 gamma,
103 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000104}
105
106bool IsConstantSupported(Compute compute,
107 const TensorInfo& output,
108 char* reasonIfUnsupported,
109 size_t reasonIfUnsupportedMaxLength)
110{
111 FORWARD_LAYER_SUPPORT_FUNC(compute, IsConstantSupported, output);
112}
113
telsoa01c577f2c2018-08-31 09:22:23 +0100114bool IsConvertFp16ToFp32Supported(Compute compute,
115 const TensorInfo& input,
116 const TensorInfo& output,
117 char* reasonIfUnsupported,
118 size_t reasonIfUnsupportedMaxLength)
119{
120 FORWARD_LAYER_SUPPORT_FUNC(compute, IsConvertFp16ToFp32Supported, input, output);
121}
122
123bool IsConvertFp32ToFp16Supported(Compute compute,
124 const TensorInfo& input,
125 const TensorInfo& output,
126 char* reasonIfUnsupported,
127 size_t reasonIfUnsupportedMaxLength)
128{
129 FORWARD_LAYER_SUPPORT_FUNC(compute, IsConvertFp32ToFp16Supported, input, output);
130}
131
telsoa014fcda012018-03-09 14:13:49 +0000132bool IsConvolution2dSupported(Compute compute,
133 const TensorInfo& input,
surmeh013537c2c2018-05-18 16:31:43 +0100134 const TensorInfo& output,
telsoa014fcda012018-03-09 14:13:49 +0000135 const Convolution2dDescriptor& descriptor,
136 const TensorInfo& weights,
arovir01a6824102018-08-28 17:40:45 +0100137 const boost::optional<TensorInfo>& biases,
telsoa014fcda012018-03-09 14:13:49 +0000138 char* reasonIfUnsupported,
139 size_t reasonIfUnsupportedMaxLength)
140{
surmeh013537c2c2018-05-18 16:31:43 +0100141 FORWARD_LAYER_SUPPORT_FUNC(compute, IsConvolution2dSupported, input, output, descriptor, weights, biases);
telsoa014fcda012018-03-09 14:13:49 +0000142}
143
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100144bool IsDivisionSupported(Compute compute,
145 const TensorInfo& input0,
146 const TensorInfo& input1,
147 const TensorInfo& output,
148 char* reasonIfUnsupported,
149 size_t reasonIfUnsupportedMaxLength)
150{
151 FORWARD_LAYER_SUPPORT_FUNC(compute, IsDivisionSupported, input0, input1, output);
152}
153
telsoa014fcda012018-03-09 14:13:49 +0000154bool IsDepthwiseConvolutionSupported(Compute compute,
155 const TensorInfo& input,
telsoa01c577f2c2018-08-31 09:22:23 +0100156 const TensorInfo& output,
telsoa014fcda012018-03-09 14:13:49 +0000157 const DepthwiseConvolution2dDescriptor& descriptor,
158 const TensorInfo& weights,
arovir01a6824102018-08-28 17:40:45 +0100159 const boost::optional<TensorInfo>& biases,
telsoa014fcda012018-03-09 14:13:49 +0000160 char* reasonIfUnsupported,
161 size_t reasonIfUnsupportedMaxLength)
162{
telsoa01c577f2c2018-08-31 09:22:23 +0100163 FORWARD_LAYER_SUPPORT_FUNC(compute, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases);
telsoa014fcda012018-03-09 14:13:49 +0000164}
165
166bool IsInputSupported(Compute compute,
167 const TensorInfo& input,
168 char* reasonIfUnsupported,
169 size_t reasonIfUnsupportedMaxLength)
170{
171 FORWARD_LAYER_SUPPORT_FUNC(compute, IsInputSupported, input);
172}
173
174bool IsFullyConnectedSupported(Compute compute,
175 const TensorInfo& input,
telsoa01c577f2c2018-08-31 09:22:23 +0100176 const TensorInfo& output,
177 const TensorInfo& weights,
178 const TensorInfo& biases,
telsoa014fcda012018-03-09 14:13:49 +0000179 const FullyConnectedDescriptor& descriptor,
180 char* reasonIfUnsupported,
181 size_t reasonIfUnsupportedMaxLength)
182{
telsoa01c577f2c2018-08-31 09:22:23 +0100183 FORWARD_LAYER_SUPPORT_FUNC(compute, IsFullyConnectedSupported, input, output, weights, biases, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000184}
185
186bool IsL2NormalizationSupported(Compute compute,
187 const TensorInfo& input,
telsoa01c577f2c2018-08-31 09:22:23 +0100188 const TensorInfo& output,
telsoa014fcda012018-03-09 14:13:49 +0000189 char* reasonIfUnsupported,
190 size_t reasonIfUnsupportedMaxLength)
191{
telsoa01c577f2c2018-08-31 09:22:23 +0100192 FORWARD_LAYER_SUPPORT_FUNC(compute, IsL2NormalizationSupported, input, output);
telsoa014fcda012018-03-09 14:13:49 +0000193}
194
telsoa01c577f2c2018-08-31 09:22:23 +0100195bool IsLstmSupported(Compute compute, const TensorInfo& input, const TensorInfo& outputStateIn,
196 const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
197 const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
198 const TensorInfo& output, const LstmDescriptor& descriptor,
199 const TensorInfo& inputToForgetWeights, const TensorInfo& inputToCellWeights,
200 const TensorInfo& inputToOutputWeights, const TensorInfo& recurrentToForgetWeights,
201 const TensorInfo& recurrentToCellWeights, const TensorInfo& recurrentToOutputWeights,
202 const TensorInfo& forgetGateBias, const TensorInfo& cellBias,
203 const TensorInfo& outputGateBias, const TensorInfo* inputToInputWeights,
204 const TensorInfo* recurrentToInputWeights, const TensorInfo* cellToInputWeights,
205 const TensorInfo* inputGateBias, const TensorInfo* projectionWeights,
206 const TensorInfo* projectionBias, const TensorInfo* cellToForgetWeights,
207 const TensorInfo* cellToOutputWeights, char* reasonIfUnsupported,
208 size_t reasonIfUnsupportedMaxLength)
209
210{
211 FORWARD_LAYER_SUPPORT_FUNC(compute, IsLstmSupported, input, outputStateIn, cellStateIn,
212 scratchBuffer, outputStateOut, cellStateOut,
213 output, descriptor, inputToForgetWeights, inputToCellWeights,
214 inputToOutputWeights, recurrentToForgetWeights,
215 recurrentToCellWeights, recurrentToOutputWeights,
216 forgetGateBias, cellBias, outputGateBias,
217 inputToInputWeights, recurrentToInputWeights,
218 cellToInputWeights, inputGateBias, projectionWeights,
219 projectionBias, cellToForgetWeights, cellToOutputWeights);
220}
telsoa014fcda012018-03-09 14:13:49 +0000221bool IsMergerSupported(Compute compute,
222 std::vector<const TensorInfo*> inputs,
223 const OriginsDescriptor& descriptor,
224 char* reasonIfUnsupported,
225 size_t reasonIfUnsupportedMaxLength)
226{
227 BOOST_ASSERT(inputs.size() > 0);
228 FORWARD_LAYER_SUPPORT_FUNC(compute, IsMergerSupported, inputs, descriptor);
229}
230
231bool IsMultiplicationSupported(Compute compute,
232 const TensorInfo& input0,
233 const TensorInfo& input1,
telsoa01c577f2c2018-08-31 09:22:23 +0100234 const TensorInfo& output,
telsoa014fcda012018-03-09 14:13:49 +0000235 char* reasonIfUnsupported,
236 size_t reasonIfUnsupportedMaxLength)
237{
telsoa01c577f2c2018-08-31 09:22:23 +0100238 FORWARD_LAYER_SUPPORT_FUNC(compute, IsMultiplicationSupported, input0, input1, output);
telsoa014fcda012018-03-09 14:13:49 +0000239}
240
241bool IsNormalizationSupported(Compute compute,
242 const TensorInfo& input,
243 const TensorInfo& output,
244 const NormalizationDescriptor& descriptor,
245 char* reasonIfUnsupported,
246 size_t reasonIfUnsupportedMaxLength)
247{
248 FORWARD_LAYER_SUPPORT_FUNC(compute, IsNormalizationSupported, input, output, descriptor);
249}
250
251bool IsOutputSupported(Compute compute,
252 const TensorInfo& output,
253 char* reasonIfUnsupported,
254 size_t reasonIfUnsupportedMaxLength)
255{
256 FORWARD_LAYER_SUPPORT_FUNC(compute, IsOutputSupported, output);
257}
258
259bool IsPermuteSupported(Compute compute,
260 const TensorInfo& input,
261 const TensorInfo& output,
262 const PermuteDescriptor& descriptor,
263 char* reasonIfUnsupported,
264 size_t reasonIfUnsupportedMaxLength)
265{
266 FORWARD_LAYER_SUPPORT_FUNC(compute, IsPermuteSupported, input, output, descriptor);
267}
268
269bool IsPooling2dSupported(Compute compute,
270 const TensorInfo& input,
271 const TensorInfo& output,
272 const Pooling2dDescriptor& descriptor,
273 char* reasonIfUnsupported,
274 size_t reasonIfUnsupportedMaxLength)
275{
276 FORWARD_LAYER_SUPPORT_FUNC(compute, IsPooling2dSupported, input, output, descriptor);
277}
278
279bool IsResizeBilinearSupported(Compute compute,
280 const TensorInfo& input,
281 char* reasonIfUnsupported,
282 size_t reasonIfUnsupportedMaxLength)
283{
284 FORWARD_LAYER_SUPPORT_FUNC(compute, IsResizeBilinearSupported, input);
285}
286
287bool IsSoftmaxSupported(Compute compute,
288 const TensorInfo& input,
telsoa01c577f2c2018-08-31 09:22:23 +0100289 const TensorInfo& output,
telsoa014fcda012018-03-09 14:13:49 +0000290 const SoftmaxDescriptor& descriptor,
291 char* reasonIfUnsupported,
292 size_t reasonIfUnsupportedMaxLength)
293{
telsoa01c577f2c2018-08-31 09:22:23 +0100294 FORWARD_LAYER_SUPPORT_FUNC(compute, IsSoftmaxSupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000295}
296
297bool IsSplitterSupported(Compute compute,
298 const TensorInfo& input,
299 const ViewsDescriptor& descriptor,
300 char* reasonIfUnsupported,
301 size_t reasonIfUnsupportedMaxLength)
302{
303 FORWARD_LAYER_SUPPORT_FUNC(compute, IsSplitterSupported, input, descriptor);
304}
305
306bool IsFakeQuantizationSupported(Compute compute,
307 const TensorInfo& input,
308 const FakeQuantizationDescriptor& descriptor,
309 char* reasonIfUnsupported,
310 size_t reasonIfUnsupportedMaxLength)
311{
312 FORWARD_LAYER_SUPPORT_FUNC(compute, IsFakeQuantizationSupported, input, descriptor);
313}
314
315bool IsReshapeSupported(Compute compute,
316 const TensorInfo& input,
317 char* reasonIfUnsupported,
318 size_t reasonIfUnsupportedMaxLength)
319{
320 FORWARD_LAYER_SUPPORT_FUNC(compute, IsReshapeSupported, input);
321}
322
323bool IsFloorSupported(Compute compute,
324 const TensorInfo& input,
325 const TensorInfo& output,
326 char* reasonIfUnsupported,
327 size_t reasonIfUnsupportedMaxLength)
328{
telsoa01c577f2c2018-08-31 09:22:23 +0100329 // By definition (that is, regardless of compute device), shapes and data type must match.
telsoa014fcda012018-03-09 14:13:49 +0000330 if (input.GetShape() != output.GetShape() || input.GetDataType() != output.GetDataType())
331 {
332 return false;
333 }
334
335 FORWARD_LAYER_SUPPORT_FUNC(compute, IsFloorSupported, input, output);
336}
337
338}