blob: c4d9583a6692aba9255b7b759efeaafeb61eb213 [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Mike Kelly3ec30772023-03-08 13:47:17 +00002// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
Matthew Bentham4cefc412019-06-18 16:14:34 +01005#include <Layer.hpp>
Cian McGriskin7894ef92023-08-01 14:04:09 +01006
Derek Lambertif674aa02019-08-01 15:56:25 +01007#include <backendsCommon/MemImportWorkload.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00008#include <backendsCommon/MakeWorkloadHelper.hpp>
Cian McGriskin7894ef92023-08-01 14:04:09 +01009#include <armnn/backends/MemCopyWorkload.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000010#include <armnn/backends/TensorHandle.hpp>
Cian McGriskin7894ef92023-08-01 14:04:09 +010011
telsoa014fcda012018-03-09 14:13:49 +000012#include "RefWorkloadFactory.hpp"
David Beck79141b92018-10-23 16:09:36 +010013#include "RefBackendId.hpp"
Matthew Bentham4cefc412019-06-18 16:14:34 +010014#include "RefTensorHandle.hpp"
Cian McGriskin7894ef92023-08-01 14:04:09 +010015#include "workloads/RefWorkloads.hpp"
telsoa014fcda012018-03-09 14:13:49 +000016
17namespace armnn
18{
19
David Beck79141b92018-10-23 16:09:36 +010020namespace
21{
22static const BackendId s_Id{RefBackendId()};
23}
telsoa014fcda012018-03-09 14:13:49 +000024template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
25std::unique_ptr<IWorkload> RefWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
Aron Virginas-Tare662a942019-10-14 15:12:00 +010026 const WorkloadInfo& info) const
telsoa014fcda012018-03-09 14:13:49 +000027{
Keith Davis5204aa82020-01-27 15:24:59 +000028 return MakeWorkloadHelper<NullWorkload, F32Workload, U8Workload, NullWorkload, NullWorkload, NullWorkload>
Cian McGriskin7894ef92023-08-01 14:04:09 +010029 (descriptor, info);
telsoa014fcda012018-03-09 14:13:49 +000030}
31
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010032template <DataType ArmnnType>
33bool IsDataType(const WorkloadInfo& info)
Jim Flynn82fbe7c2019-04-02 15:19:08 +010034{
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010035 auto checkType = [](const TensorInfo& tensorInfo) {return tensorInfo.GetDataType() == ArmnnType;};
36 auto it = std::find_if(std::begin(info.m_InputTensorInfos), std::end(info.m_InputTensorInfos), checkType);
Jim Flynn82fbe7c2019-04-02 15:19:08 +010037 if (it != std::end(info.m_InputTensorInfos))
38 {
39 return true;
40 }
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010041 it = std::find_if(std::begin(info.m_OutputTensorInfos), std::end(info.m_OutputTensorInfos), checkType);
Jim Flynn82fbe7c2019-04-02 15:19:08 +010042 if (it != std::end(info.m_OutputTensorInfos))
43 {
44 return true;
45 }
46 return false;
47}
Keith Davis0c2eeac2020-02-11 16:51:50 +000048bool IsSigned32(const WorkloadInfo& info)
49{
50 return IsDataType<DataType::Signed32>(info);
51}
Narumol Prangnawarat44179c32020-03-11 14:51:27 +000052bool IsBFloat16(const WorkloadInfo& info)
53{
54 return IsDataType<DataType::BFloat16>(info);
55}
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010056bool IsFloat16(const WorkloadInfo& info)
57{
58 return IsDataType<DataType::Float16>(info);
59}
Keith Davis0c2eeac2020-02-11 16:51:50 +000060bool IsQSymmS16(const WorkloadInfo& info)
nikraj0199a66312019-06-06 10:31:27 +010061{
Derek Lambertif90c56d2020-01-10 17:14:08 +000062 return IsDataType<DataType::QSymmS16>(info);
nikraj0199a66312019-06-06 10:31:27 +010063}
Keith Davis0c2eeac2020-02-11 16:51:50 +000064bool IsQSymmS8(const WorkloadInfo& info)
Keith Davis5204aa82020-01-27 15:24:59 +000065{
66 return IsDataType<DataType::QSymmS8>(info);
67}
Keith Davis67e6c542020-02-19 10:08:33 +000068bool IsQAsymmS8(const WorkloadInfo& info)
69{
70 return IsDataType<DataType::QAsymmS8>(info);
71}
Keith Davis67e6c542020-02-19 10:08:33 +000072bool IsQAsymmU8(const WorkloadInfo& info)
73{
74 return IsDataType<DataType::QAsymmU8>(info);
75}
76
Matthew Bentham7c1603a2019-06-21 17:22:23 +010077RefWorkloadFactory::RefWorkloadFactory(const std::shared_ptr<RefMemoryManager>& memoryManager)
78 : m_MemoryManager(memoryManager)
79{
80}
81
telsoa01c577f2c2018-08-31 09:22:23 +010082RefWorkloadFactory::RefWorkloadFactory()
Matthew Bentham7c1603a2019-06-21 17:22:23 +010083 : m_MemoryManager(new RefMemoryManager())
telsoa014fcda012018-03-09 14:13:49 +000084{
85}
86
David Beck79141b92018-10-23 16:09:36 +010087const BackendId& RefWorkloadFactory::GetBackendId() const
88{
89 return s_Id;
90}
91
David Beck29c75de2018-10-23 13:35:58 +010092bool RefWorkloadFactory::IsLayerSupported(const Layer& layer,
93 Optional<DataType> dataType,
telsoa01c577f2c2018-08-31 09:22:23 +010094 std::string& outReasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000095{
David Beck79141b92018-10-23 16:09:36 +010096 return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
telsoa014fcda012018-03-09 14:13:49 +000097}
98
Sadik Armagan04a72972020-09-14 15:44:18 +010099bool RefWorkloadFactory::IsLayerSupported(const IConnectableLayer& layer,
100 Optional<DataType> dataType,
101 std::string& outReasonIfUnsupported,
102 const ModelOptions& modelOptions)
103{
104 return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
105}
106
David Monahan3fb7e102019-08-20 11:25:29 +0100107std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
Derek Lamberti901ea112019-12-10 22:07:09 +0000108 const bool isMemoryManaged) const
telsoa014fcda012018-03-09 14:13:49 +0000109{
Finn Williamsb1aad422021-10-28 19:07:32 +0100110 if (isMemoryManaged)
111 {
112 return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
113 }
114 else
115 {
Matthew Benthamc30abd82022-11-23 12:11:32 +0000116 return std::make_unique<RefTensorHandle>(tensorInfo);
Finn Williamsb1aad422021-10-28 19:07:32 +0100117 }
telsoa014fcda012018-03-09 14:13:49 +0000118}
119
Francis Murtagh351d13d2018-09-24 15:01:18 +0100120std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
David Monahan3fb7e102019-08-20 11:25:29 +0100121 DataLayout dataLayout,
Derek Lamberti901ea112019-12-10 22:07:09 +0000122 const bool isMemoryManaged) const
Francis Murtagh351d13d2018-09-24 15:01:18 +0100123{
David Monahan3fb7e102019-08-20 11:25:29 +0100124 // For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
125 // to unmanaged memory. This also ensures memory alignment.
Jan Eilers8eb25602020-03-09 12:13:48 +0000126 IgnoreUnused(isMemoryManaged, dataLayout);
Finn Williamsb1aad422021-10-28 19:07:32 +0100127
128 if (isMemoryManaged)
129 {
130 return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
131 }
132 else
133 {
Matthew Benthamc30abd82022-11-23 12:11:32 +0000134 return std::make_unique<RefTensorHandle>(tensorInfo);
Finn Williamsb1aad422021-10-28 19:07:32 +0100135 }
Francis Murtagh351d13d2018-09-24 15:01:18 +0100136}
137
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000138std::unique_ptr<IWorkload> RefWorkloadFactory::CreateWorkload(LayerType type,
139 const QueueDescriptor& descriptor,
140 const WorkloadInfo& info) const
141{
142 switch(type)
143 {
144 case LayerType::Activation :
145 {
146 auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
147 return std::make_unique<RefActivationWorkload>(*activationQueueDescriptor, info);
148 }
149 case LayerType::Addition :
150 {
151 auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000152 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
153 {
154 return std::make_unique<RefAdditionWorkload<int32_t>>(*additionQueueDescriptor, info);
155 }
156 else
157 {
158 return std::make_unique<RefAdditionWorkload<float>>(*additionQueueDescriptor, info);
159 }
160 }
161 case LayerType::ArgMinMax :
162 {
163 auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
164 return std::make_unique<RefArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
165 }
Samuel Yap6b478092022-07-06 15:36:03 +0100166 case LayerType::BatchMatMul:
167 {
168 auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
169 return std::make_unique<RefBatchMatMulWorkload>(*batchMatMulQueueDescriptor, info);
170 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000171 case LayerType::BatchNormalization :
172 {
173 auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
174 return std::make_unique<RefBatchNormalizationWorkload>(*batchNormQueueDescriptor, info);
175 }
176 case LayerType::BatchToSpaceNd :
177 {
178 auto batchToSpaceNdQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100179 = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000180 return std::make_unique<RefBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
Cian McGriskin7894ef92023-08-01 14:04:09 +0100181 }
Idriss Chaouch98e383e2023-08-28 14:28:31 +0100182 case LayerType::BroadcastTo:
183 {
184 auto broadcastToQueueDescriptor = PolymorphicDowncast<const BroadcastToQueueDescriptor*>(&descriptor);
185 return std::make_unique<RefBroadcastToWorkload>(*broadcastToQueueDescriptor, info);
186 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000187 case LayerType::Cast :
188 {
189 auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
190 return std::make_unique<RefCastWorkload>(*castQueueDescriptor, info);
191 }
192 case LayerType::ChannelShuffle :
193 {
194 auto channelShuffleQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100195 = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000196 return std::make_unique<RefChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
197 }
198 case LayerType::Comparison :
199 {
200 auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
201 return std::make_unique<RefComparisonWorkload>(*comparisonQueueDescriptor, info);
202 }
203 case LayerType::Concat :
204 {
205 auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
206 return std::make_unique<RefConcatWorkload>(*concatQueueDescriptor, info);
207 }
208 case LayerType::Constant :
209 {
210 auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
211 return std::make_unique<RefConstantWorkload>(*constantQueueDescriptor, info);
212 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000213 case LayerType::ConvertFp16ToFp32:
214 {
215 auto convertFp16ToFp32QueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100216 = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000217 return std::make_unique<RefConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
218 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000219 case LayerType::ConvertFp32ToFp16:
220 {
221 auto convertFp32ToFp16QueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100222 = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000223 return std::make_unique<RefConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
224 }
225 case LayerType::Convolution2d:
226 {
227 auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
228 return std::make_unique<RefConvolution2dWorkload>(*convolution2dQueueDescriptor, info);
229 }
230 case LayerType::Convolution3d:
231 {
232 auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
233 return std::make_unique<RefConvolution3dWorkload>(*convolution3dQueueDescriptor, info);
234 }
235 case LayerType::Debug:
236 {
237 auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
238 if (IsBFloat16(info))
239 {
240 return std::make_unique<RefDebugBFloat16Workload>(*debugQueueDescriptor, info);
241 }
242 if (IsFloat16(info))
243 {
244 return std::make_unique<RefDebugFloat16Workload>(*debugQueueDescriptor, info);
245 }
246 if (IsQSymmS16(info))
247 {
248 return std::make_unique<RefDebugQSymmS16Workload>(*debugQueueDescriptor, info);
249 }
250 if (IsQSymmS8(info))
251 {
252 return std::make_unique<RefDebugQSymmS8Workload>(*debugQueueDescriptor, info);
253 }
254 if (IsQAsymmU8(info))
255 {
256 return std::make_unique<RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
257 }
258 if (IsQAsymmS8(info))
259 {
260 return std::make_unique<RefDebugQAsymmS8Workload>(*debugQueueDescriptor, info);
261 }
262 if (IsSigned32(info))
263 {
264 return std::make_unique<RefDebugSigned32Workload>(*debugQueueDescriptor, info);
265 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000266 return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
267 }
268 case LayerType::DepthToSpace:
269 {
270 auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
271 return std::make_unique<RefDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
272 }
273 case LayerType::DepthwiseConvolution2d:
274 {
275 auto depthwiseConvolution2DQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100276 = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000277 return std::make_unique<RefDepthwiseConvolution2dWorkload>(*depthwiseConvolution2DQueueDescriptor, info);
278 }
279 case LayerType::Dequantize:
280 {
281 auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
282 return std::make_unique<RefDequantizeWorkload>(*dequantizeQueueDescriptor, info);
283 }
284 case LayerType::DetectionPostProcess:
285 {
286 auto detectionPostProcessQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100287 = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000288 return std::make_unique<RefDetectionPostProcessWorkload>(*detectionPostProcessQueueDescriptor, info);
289 }
290 case LayerType::Division:
291 {
292 auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
293 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
294 {
295 return std::make_unique<RefDivisionWorkload<int32_t>>(*divisionQueueDescriptor, info);
296 }
297 else
298 {
299 return std::make_unique<RefDivisionWorkload<float>>(*divisionQueueDescriptor, info);
300 }
301 }
Mike Kelly3ec30772023-03-08 13:47:17 +0000302 case LayerType::ElementwiseBinary:
303 {
304 auto elementwiseBinaryQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100305 = PolymorphicDowncast<const ElementwiseBinaryQueueDescriptor*>(&descriptor);
Mike Kelly3ec30772023-03-08 13:47:17 +0000306 return std::make_unique<RefElementwiseBinaryWorkload>(*elementwiseBinaryQueueDescriptor, info);
307 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000308 case LayerType::ElementwiseUnary:
309 {
310 auto elementwiseUnaryQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100311 = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000312 if ((*elementwiseUnaryQueueDescriptor).m_Parameters.m_Operation == UnaryOperation::LogicalNot)
313 {
314 return std::make_unique<RefLogicalUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
315 }
316 return std::make_unique<RefElementwiseUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
317 }
318 case LayerType::FakeQuantization:
319 {
320 auto fakeQuantizationQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100321 = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000322 return std::make_unique<RefFakeQuantizationFloat32Workload>(*fakeQuantizationQueueDescriptor, info);
323 }
324 case LayerType::Fill:
325 {
326 auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
327 return std::make_unique<RefFillWorkload>(*fillQueueDescriptor, info);
328 }
329 case LayerType::Floor:
330 {
331 auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
332 if(IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()))
333 {
334 return nullptr;
335 }
336 else
337 {
338 return std::make_unique<RefFloorWorkload>(*floorQueueDescriptor, info);
339 }
340 }
341 case LayerType::FullyConnected:
342 {
343 auto fullyConnectedQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100344 = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000345 return std::make_unique<RefFullyConnectedWorkload>(*fullyConnectedQueueDescriptor, info);
346 }
347 case LayerType::Gather:
348 {
349 auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
350 return std::make_unique<RefGatherWorkload>(*gatherQueueDescriptor, info);
351 }
Teresa Charlinb2d3ec52022-04-12 22:07:09 +0100352 case LayerType::GatherNd:
353 {
354 auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
355 return std::make_unique<RefGatherNdWorkload>(*gatherNdQueueDescriptor, info);
356 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000357 case LayerType::Input:
358 {
359 auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
360 if (info.m_InputTensorInfos.empty() )
361 {
362 throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length");
363 }
364 if (info.m_OutputTensorInfos.empty())
365 {
366 throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length");
367 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000368 if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
369 {
370 throw InvalidArgumentException("RefWorkloadFactory::CreateInput: "
371 "data input and output differ in byte count.");
372 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000373 return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
374 }
375 case LayerType::InstanceNormalization:
376 {
377 auto instanceNormalizationQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100378 = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000379 return std::make_unique<RefInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
380 }
381 case LayerType::L2Normalization:
382 {
383 auto l2NormalizationQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100384 = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000385 return std::make_unique<RefL2NormalizationWorkload>(*l2NormalizationQueueDescriptor, info);
386 }
387 case LayerType::LogicalBinary:
388 {
389 auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
390 return std::make_unique<RefLogicalBinaryWorkload>(*logicalBinaryQueueDescriptor, info);
391 }
392 case LayerType::LogSoftmax:
393 {
394 auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
395 return std::make_unique<RefLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor, info);
396 }
397 case LayerType::Lstm:
398 {
399 auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
400 return std::make_unique<RefLstmWorkload>(*lstmQueueDescriptor, info);
401 }
402 case LayerType::Maximum:
403 {
404 auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
405 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
406 {
407 return std::make_unique<RefMaximumWorkload<int32_t>>(*maximumQueueDescriptor, info);
408 }
409 else
410 {
411 return std::make_unique<RefMaximumWorkload<float>>(*maximumQueueDescriptor, info);
412 }
413 }
414 case LayerType::Mean:
415 {
416 auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
417 return std::make_unique<RefMeanWorkload>(*meanQueueDescriptor, info);
418 }
419 case LayerType::MemCopy:
420 {
421 auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
422 if (descriptor.m_Inputs.empty())
423 {
424 throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
425 }
426 return std::make_unique<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
427 }
428 case LayerType::MemImport:
429 {
430 auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
431 if (descriptor.m_Inputs.empty())
432 {
433 throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
434 }
435 return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
436 }
437 case LayerType::Minimum:
438 {
439 auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
440 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
441 {
442 return std::make_unique<RefMinimumWorkload<int32_t>>(*minimumQueueDescriptor, info);
443 }
444 else
445 {
446 return std::make_unique<RefMinimumWorkload<float>>(*minimumQueueDescriptor, info);
447 }
448 }
449 case LayerType::Multiplication:
450 {
451 auto multiplicationQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100452 = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000453 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
454 {
455 return std::make_unique<RefMultiplicationWorkload<int32_t>>(*multiplicationQueueDescriptor, info);
456 }
457 else
458 {
459 return std::make_unique<RefMultiplicationWorkload<float>>(*multiplicationQueueDescriptor, info);
460 }
461 }
462 case LayerType::Normalization:
463 {
464 auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
465 return std::make_unique<RefNormalizationWorkload>(*normalizationQueueDescriptor, info);
466 }
467 case LayerType::Output:
468 {
469 auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
470 if (info.m_InputTensorInfos.empty() )
471 {
472 throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length");
473 }
474 if (info.m_OutputTensorInfos.empty())
475 {
476 throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length");
477 }
478 if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
479 {
480 throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output "
481 "differ in byte count.");
482 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000483 return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
484 }
485 case LayerType::Pad:
486 {
487 auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
488 return std::make_unique<RefPadWorkload>(*padQueueDescriptor, info);
489 }
490 case LayerType::Permute:
491 {
492 auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
493 if (IsQSymmS16(info))
494 {
495 return std::make_unique<RefPermuteQSymm16Workload>(*permuteQueueDescriptor, info);
496 }
497 else if (IsBFloat16(info))
498 {
499 return std::make_unique<RefPermuteBFloat16Workload>(*permuteQueueDescriptor, info);
500 }
501 else if (IsQAsymmS8(info))
502 {
503 return std::make_unique<RefPermuteQAsymmS8Workload>(*permuteQueueDescriptor, info);
504 }
505 return MakeWorkloadHelper<RefPermuteFloat16Workload, RefPermuteFloat32Workload, RefPermuteQAsymm8Workload,
Cian McGriskin7894ef92023-08-01 14:04:09 +0100506 NullWorkload, NullWorkload, NullWorkload>(*permuteQueueDescriptor, info);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000507 }
508 case LayerType::Pooling2d:
509 {
510 auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
511 return std::make_unique<RefPooling2dWorkload>(*pooling2dQueueDescriptor, info);
512 }
513 case LayerType::Pooling3d:
514 {
515 auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
516 return std::make_unique<RefPooling3dWorkload>(*pooling3dQueueDescriptor, info);
517 }
518 case LayerType::PreCompiled:
519 {
520 return nullptr;
521 }
522 case LayerType::Prelu:
523 {
524 auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
525 return std::make_unique<RefPreluWorkload>(*preluQueueDescriptor, info);
526 }
527 case LayerType::QLstm:
528 {
529 auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
530 return std::make_unique<RefQLstmWorkload>(*qlstmQueueDescriptor, info);
531 }
532 case LayerType::Quantize:
533 {
534 auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
535 return std::make_unique<RefQuantizeWorkload>(*quantizeQueueDescriptor, info);
536 }
537 case LayerType::Rank:
538 {
539 auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
540 return std::make_unique<RefRankWorkload>(*rankQueueDescriptor, info);
541 }
542 case LayerType::Reduce:
543 {
544 auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
545 return std::make_unique<RefReduceWorkload>(*reduceQueueDescriptor, info);
546 }
547 case LayerType::Reshape:
548 {
549 auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
550 return std::make_unique<RefReshapeWorkload>(*reshapeQueueDescriptor, info);
551 }
552 case LayerType::Resize:
553 {
554 auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
555 return std::make_unique<RefResizeWorkload>(*resizeQueueDescriptor, info);
556 }
Tianle Cheng988354d2023-06-28 13:20:47 +0100557 case LayerType::ReverseV2:
558 {
559 auto reverseV2QueueDescriptor = PolymorphicDowncast<const ReverseV2QueueDescriptor*>(&descriptor);
560 return std::make_unique<RefReverseV2Workload>(*reverseV2QueueDescriptor, info);
561 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000562 case LayerType::Shape:
563 {
564 auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
565 return std::make_unique<RefShapeWorkload>(*shapeQueueDescriptor, info);
566 }
567 case LayerType::Slice:
568 {
569 auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
570 return std::make_unique<RefSliceWorkload>(*sliceQueueDescriptor, info);
571 }
572 case LayerType::Softmax:
573 {
574 auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
575 return std::make_unique<RefSoftmaxWorkload>(*softmaxQueueDescriptor, info);
576 }
577 case LayerType::SpaceToBatchNd:
578 {
579 auto spaceToBatchNdQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100580 = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000581 return std::make_unique<RefSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
582 }
583 case LayerType::SpaceToDepth:
584 {
585 auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
586 return std::make_unique<RefSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
587 }
588 case LayerType::Splitter:
589 {
590 auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
591 return std::make_unique<RefSplitterWorkload>(*splitterQueueDescriptor, info);
592 }
593 case LayerType::Stack:
594 {
595 auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
596 return std::make_unique<RefStackWorkload>(*stackQueueDescriptor, info);
597 }
598 case LayerType::StridedSlice:
599 {
600 auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
601 return std::make_unique<RefStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
602 }
603 case LayerType::Subtraction:
604 {
605 auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
606 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
607 {
608 return std::make_unique<RefSubtractionWorkload<int32_t>>(*subtractionQueueDescriptor, info);
609 }
610 else
611 {
612 return std::make_unique<RefSubtractionWorkload<float>>(*subtractionQueueDescriptor, info);
613 }
614 }
Teresa Charlin79a06a52023-07-13 17:16:45 +0100615 case LayerType::Tile:
616 {
617 auto tileQueueDescriptor = PolymorphicDowncast<const TileQueueDescriptor*>(&descriptor);
618 return std::make_unique<RefTileWorkload>(*tileQueueDescriptor, info);
619 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000620 case LayerType::Transpose:
621 {
622 auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
623 if (IsQSymmS16(info))
624 {
625 return std::make_unique<RefTransposeQSymm16Workload>(*transposeQueueDescriptor, info);
626 }
627 else if (IsBFloat16(info))
628 {
629 return std::make_unique<RefTransposeBFloat16Workload>(*transposeQueueDescriptor, info);
630 }
631 else if (IsQAsymmS8(info))
632 {
633 return std::make_unique<RefTransposeQAsymmS8Workload>(*transposeQueueDescriptor, info);
634 }
635 return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload,
Cian McGriskin7894ef92023-08-01 14:04:09 +0100636 RefTransposeQAsymm8Workload, NullWorkload, NullWorkload, NullWorkload>
637 (*transposeQueueDescriptor, info);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000638 }
639 case LayerType::TransposeConvolution2d:
640 {
641 auto transposeConvolution2dQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100642 = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000643 return std::make_unique<RefTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor, info);
644 }
645 case LayerType::UnidirectionalSequenceLstm:
646 {
647 auto unidirectionalSequenceLstmQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100648 = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000649 return std::make_unique<RefUnidirectionalSequenceLstmWorkload>(*unidirectionalSequenceLstmQueueDescriptor,
650 info);
651 }
652 default:
653 return nullptr;
654 }
655}
656
Matteo Martincigh49124022019-01-11 13:25:59 +0000657} // namespace armnn