blob: 9db81fc9cbd001a6aa252e0b4dd30f6c5a782eea [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Teresa Charlin611c7fb2022-01-07 09:47:29 +00002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
Matthew Bentham4cefc412019-06-18 16:14:34 +01005#include <Layer.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +00006#include <armnn/backends/MemCopyWorkload.hpp>
Derek Lambertif674aa02019-08-01 15:56:25 +01007#include <backendsCommon/MemImportWorkload.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00008#include <backendsCommon/MakeWorkloadHelper.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +00009#include <armnn/backends/TensorHandle.hpp>
telsoa014fcda012018-03-09 14:13:49 +000010#include "RefWorkloadFactory.hpp"
David Beck79141b92018-10-23 16:09:36 +010011#include "RefBackendId.hpp"
David Beckb4540be2018-09-24 13:18:27 +010012#include "workloads/RefWorkloads.hpp"
Matthew Bentham4cefc412019-06-18 16:14:34 +010013#include "RefTensorHandle.hpp"
telsoa014fcda012018-03-09 14:13:49 +000014
telsoa014fcda012018-03-09 14:13:49 +000015
16namespace armnn
17{
18
David Beck79141b92018-10-23 16:09:36 +010019namespace
20{
21static const BackendId s_Id{RefBackendId()};
22}
telsoa014fcda012018-03-09 14:13:49 +000023template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
24std::unique_ptr<IWorkload> RefWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
Aron Virginas-Tare662a942019-10-14 15:12:00 +010025 const WorkloadInfo& info) const
telsoa014fcda012018-03-09 14:13:49 +000026{
Keith Davis5204aa82020-01-27 15:24:59 +000027 return MakeWorkloadHelper<NullWorkload, F32Workload, U8Workload, NullWorkload, NullWorkload, NullWorkload>
28 (descriptor, info);
telsoa014fcda012018-03-09 14:13:49 +000029}
30
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010031template <DataType ArmnnType>
32bool IsDataType(const WorkloadInfo& info)
Jim Flynn82fbe7c2019-04-02 15:19:08 +010033{
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010034 auto checkType = [](const TensorInfo& tensorInfo) {return tensorInfo.GetDataType() == ArmnnType;};
35 auto it = std::find_if(std::begin(info.m_InputTensorInfos), std::end(info.m_InputTensorInfos), checkType);
Jim Flynn82fbe7c2019-04-02 15:19:08 +010036 if (it != std::end(info.m_InputTensorInfos))
37 {
38 return true;
39 }
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010040 it = std::find_if(std::begin(info.m_OutputTensorInfos), std::end(info.m_OutputTensorInfos), checkType);
Jim Flynn82fbe7c2019-04-02 15:19:08 +010041 if (it != std::end(info.m_OutputTensorInfos))
42 {
43 return true;
44 }
45 return false;
46}
47
Keith Davis0c2eeac2020-02-11 16:51:50 +000048bool IsSigned32(const WorkloadInfo& info)
49{
50 return IsDataType<DataType::Signed32>(info);
51}
52
Narumol Prangnawarat44179c32020-03-11 14:51:27 +000053bool IsBFloat16(const WorkloadInfo& info)
54{
55 return IsDataType<DataType::BFloat16>(info);
56}
57
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010058bool IsFloat16(const WorkloadInfo& info)
59{
60 return IsDataType<DataType::Float16>(info);
61}
62
Keith Davis0c2eeac2020-02-11 16:51:50 +000063bool IsQSymmS16(const WorkloadInfo& info)
nikraj0199a66312019-06-06 10:31:27 +010064{
Derek Lambertif90c56d2020-01-10 17:14:08 +000065 return IsDataType<DataType::QSymmS16>(info);
nikraj0199a66312019-06-06 10:31:27 +010066}
67
Keith Davis0c2eeac2020-02-11 16:51:50 +000068bool IsQSymmS8(const WorkloadInfo& info)
Keith Davis5204aa82020-01-27 15:24:59 +000069{
70 return IsDataType<DataType::QSymmS8>(info);
71}
72
Keith Davis67e6c542020-02-19 10:08:33 +000073bool IsQAsymmS8(const WorkloadInfo& info)
74{
75 return IsDataType<DataType::QAsymmS8>(info);
76}
77
78bool IsQAsymmU8(const WorkloadInfo& info)
79{
80 return IsDataType<DataType::QAsymmU8>(info);
81}
82
Matthew Bentham7c1603a2019-06-21 17:22:23 +010083RefWorkloadFactory::RefWorkloadFactory(const std::shared_ptr<RefMemoryManager>& memoryManager)
84 : m_MemoryManager(memoryManager)
85{
86}
87
telsoa01c577f2c2018-08-31 09:22:23 +010088RefWorkloadFactory::RefWorkloadFactory()
Matthew Bentham7c1603a2019-06-21 17:22:23 +010089 : m_MemoryManager(new RefMemoryManager())
telsoa014fcda012018-03-09 14:13:49 +000090{
91}
92
David Beck79141b92018-10-23 16:09:36 +010093const BackendId& RefWorkloadFactory::GetBackendId() const
94{
95 return s_Id;
96}
97
David Beck29c75de2018-10-23 13:35:58 +010098bool RefWorkloadFactory::IsLayerSupported(const Layer& layer,
99 Optional<DataType> dataType,
telsoa01c577f2c2018-08-31 09:22:23 +0100100 std::string& outReasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +0000101{
David Beck79141b92018-10-23 16:09:36 +0100102 return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
telsoa014fcda012018-03-09 14:13:49 +0000103}
104
Sadik Armagan04a72972020-09-14 15:44:18 +0100105bool RefWorkloadFactory::IsLayerSupported(const IConnectableLayer& layer,
106 Optional<DataType> dataType,
107 std::string& outReasonIfUnsupported,
108 const ModelOptions& modelOptions)
109{
110 return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
111}
112
David Monahan3fb7e102019-08-20 11:25:29 +0100113std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
Derek Lamberti901ea112019-12-10 22:07:09 +0000114 const bool isMemoryManaged) const
telsoa014fcda012018-03-09 14:13:49 +0000115{
Finn Williamsb1aad422021-10-28 19:07:32 +0100116 if (isMemoryManaged)
117 {
118 return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
119 }
120 else
121 {
122 return std::make_unique<RefTensorHandle>(tensorInfo, static_cast<unsigned int>(MemorySource::Malloc));
123 }
telsoa014fcda012018-03-09 14:13:49 +0000124}
125
Francis Murtagh351d13d2018-09-24 15:01:18 +0100126std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
David Monahan3fb7e102019-08-20 11:25:29 +0100127 DataLayout dataLayout,
Derek Lamberti901ea112019-12-10 22:07:09 +0000128 const bool isMemoryManaged) const
Francis Murtagh351d13d2018-09-24 15:01:18 +0100129{
David Monahan3fb7e102019-08-20 11:25:29 +0100130 // For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
131 // to unmanaged memory. This also ensures memory alignment.
Jan Eilers8eb25602020-03-09 12:13:48 +0000132 IgnoreUnused(isMemoryManaged, dataLayout);
Finn Williamsb1aad422021-10-28 19:07:32 +0100133
134 if (isMemoryManaged)
135 {
136 return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
137 }
138 else
139 {
140 return std::make_unique<RefTensorHandle>(tensorInfo, static_cast<unsigned int>(MemorySource::Malloc));
141 }
Francis Murtagh351d13d2018-09-24 15:01:18 +0100142}
143
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000144std::unique_ptr<IWorkload> RefWorkloadFactory::CreateWorkload(LayerType type,
145 const QueueDescriptor& descriptor,
146 const WorkloadInfo& info) const
147{
148 switch(type)
149 {
150 case LayerType::Activation :
151 {
152 auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
153 return std::make_unique<RefActivationWorkload>(*activationQueueDescriptor, info);
154 }
155 case LayerType::Addition :
156 {
157 auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
158
159 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
160 {
161 return std::make_unique<RefAdditionWorkload<int32_t>>(*additionQueueDescriptor, info);
162 }
163 else
164 {
165 return std::make_unique<RefAdditionWorkload<float>>(*additionQueueDescriptor, info);
166 }
167 }
168 case LayerType::ArgMinMax :
169 {
170 auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
171 return std::make_unique<RefArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
172 }
173 case LayerType::BatchNormalization :
174 {
175 auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
176 return std::make_unique<RefBatchNormalizationWorkload>(*batchNormQueueDescriptor, info);
177 }
178 case LayerType::BatchToSpaceNd :
179 {
180 auto batchToSpaceNdQueueDescriptor
181 = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
182 return std::make_unique<RefBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
183 }
184 case LayerType::Cast :
185 {
186 auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
187 return std::make_unique<RefCastWorkload>(*castQueueDescriptor, info);
188 }
189 case LayerType::ChannelShuffle :
190 {
191 auto channelShuffleQueueDescriptor
192 = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
193 return std::make_unique<RefChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
194 }
195 case LayerType::Comparison :
196 {
197 auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
198 return std::make_unique<RefComparisonWorkload>(*comparisonQueueDescriptor, info);
199 }
200 case LayerType::Concat :
201 {
202 auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
203 return std::make_unique<RefConcatWorkload>(*concatQueueDescriptor, info);
204 }
205 case LayerType::Constant :
206 {
207 auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
208 return std::make_unique<RefConstantWorkload>(*constantQueueDescriptor, info);
209 }
210 case LayerType::ConvertBf16ToFp32 :
211 {
212 auto convertBf16ToFp32QueueDescriptor
213 = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
214 return std::make_unique<RefConvertBf16ToFp32Workload>(*convertBf16ToFp32QueueDescriptor, info);
215 }
216 case LayerType::ConvertFp16ToFp32:
217 {
218 auto convertFp16ToFp32QueueDescriptor
219 = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
220 return std::make_unique<RefConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
221 }
222 case LayerType::ConvertFp32ToBf16:
223 {
224 auto convertFp32ToBf16QueueDescriptor
225 = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
226 return std::make_unique<RefConvertFp32ToBf16Workload>(*convertFp32ToBf16QueueDescriptor, info);
227 }
228 case LayerType::ConvertFp32ToFp16:
229 {
230 auto convertFp32ToFp16QueueDescriptor
231 = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
232 return std::make_unique<RefConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
233 }
234 case LayerType::Convolution2d:
235 {
236 auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
237 return std::make_unique<RefConvolution2dWorkload>(*convolution2dQueueDescriptor, info);
238 }
239 case LayerType::Convolution3d:
240 {
241 auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
242 return std::make_unique<RefConvolution3dWorkload>(*convolution3dQueueDescriptor, info);
243 }
244 case LayerType::Debug:
245 {
246 auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
247 if (IsBFloat16(info))
248 {
249 return std::make_unique<RefDebugBFloat16Workload>(*debugQueueDescriptor, info);
250 }
251 if (IsFloat16(info))
252 {
253 return std::make_unique<RefDebugFloat16Workload>(*debugQueueDescriptor, info);
254 }
255 if (IsQSymmS16(info))
256 {
257 return std::make_unique<RefDebugQSymmS16Workload>(*debugQueueDescriptor, info);
258 }
259 if (IsQSymmS8(info))
260 {
261 return std::make_unique<RefDebugQSymmS8Workload>(*debugQueueDescriptor, info);
262 }
263 if (IsQAsymmU8(info))
264 {
265 return std::make_unique<RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
266 }
267 if (IsQAsymmS8(info))
268 {
269 return std::make_unique<RefDebugQAsymmS8Workload>(*debugQueueDescriptor, info);
270 }
271 if (IsSigned32(info))
272 {
273 return std::make_unique<RefDebugSigned32Workload>(*debugQueueDescriptor, info);
274 }
275
276 return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
277 }
278 case LayerType::DepthToSpace:
279 {
280 auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
281 return std::make_unique<RefDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
282 }
283 case LayerType::DepthwiseConvolution2d:
284 {
285 auto depthwiseConvolution2DQueueDescriptor
286 = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
287 return std::make_unique<RefDepthwiseConvolution2dWorkload>(*depthwiseConvolution2DQueueDescriptor, info);
288 }
289 case LayerType::Dequantize:
290 {
291 auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
292 return std::make_unique<RefDequantizeWorkload>(*dequantizeQueueDescriptor, info);
293 }
294 case LayerType::DetectionPostProcess:
295 {
296 auto detectionPostProcessQueueDescriptor
297 = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
298 return std::make_unique<RefDetectionPostProcessWorkload>(*detectionPostProcessQueueDescriptor, info);
299 }
300 case LayerType::Division:
301 {
302 auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
303 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
304 {
305 return std::make_unique<RefDivisionWorkload<int32_t>>(*divisionQueueDescriptor, info);
306 }
307 else
308 {
309 return std::make_unique<RefDivisionWorkload<float>>(*divisionQueueDescriptor, info);
310 }
311 }
312 case LayerType::ElementwiseUnary:
313 {
314 auto elementwiseUnaryQueueDescriptor
315 = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
316 if ((*elementwiseUnaryQueueDescriptor).m_Parameters.m_Operation == UnaryOperation::LogicalNot)
317 {
318 return std::make_unique<RefLogicalUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
319 }
320 return std::make_unique<RefElementwiseUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
321 }
322 case LayerType::FakeQuantization:
323 {
324 auto fakeQuantizationQueueDescriptor
325 = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
326 return std::make_unique<RefFakeQuantizationFloat32Workload>(*fakeQuantizationQueueDescriptor, info);
327 }
328 case LayerType::Fill:
329 {
330 auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
331 return std::make_unique<RefFillWorkload>(*fillQueueDescriptor, info);
332 }
333 case LayerType::Floor:
334 {
335 auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
336 if(IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()))
337 {
338 return nullptr;
339 }
340 else
341 {
342 return std::make_unique<RefFloorWorkload>(*floorQueueDescriptor, info);
343 }
344 }
345 case LayerType::FullyConnected:
346 {
347 auto fullyConnectedQueueDescriptor
348 = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
349 return std::make_unique<RefFullyConnectedWorkload>(*fullyConnectedQueueDescriptor, info);
350 }
351 case LayerType::Gather:
352 {
353 auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
354 return std::make_unique<RefGatherWorkload>(*gatherQueueDescriptor, info);
355 }
356 case LayerType::Input:
357 {
358 auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
359 if (info.m_InputTensorInfos.empty() )
360 {
361 throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length");
362 }
363 if (info.m_OutputTensorInfos.empty())
364 {
365 throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length");
366 }
367
368 if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
369 {
370 throw InvalidArgumentException("RefWorkloadFactory::CreateInput: "
371 "data input and output differ in byte count.");
372 }
373
374 return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
375 }
376 case LayerType::InstanceNormalization:
377 {
378 auto instanceNormalizationQueueDescriptor
379 = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
380 return std::make_unique<RefInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
381 }
382 case LayerType::L2Normalization:
383 {
384 auto l2NormalizationQueueDescriptor
385 = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
386 return std::make_unique<RefL2NormalizationWorkload>(*l2NormalizationQueueDescriptor, info);
387 }
388 case LayerType::LogicalBinary:
389 {
390 auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
391 return std::make_unique<RefLogicalBinaryWorkload>(*logicalBinaryQueueDescriptor, info);
392 }
393 case LayerType::LogSoftmax:
394 {
395 auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
396 return std::make_unique<RefLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor, info);
397 }
398 case LayerType::Lstm:
399 {
400 auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
401 return std::make_unique<RefLstmWorkload>(*lstmQueueDescriptor, info);
402 }
403 case LayerType::Maximum:
404 {
405 auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
406 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
407 {
408 return std::make_unique<RefMaximumWorkload<int32_t>>(*maximumQueueDescriptor, info);
409 }
410 else
411 {
412 return std::make_unique<RefMaximumWorkload<float>>(*maximumQueueDescriptor, info);
413 }
414 }
415 case LayerType::Mean:
416 {
417 auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
418 return std::make_unique<RefMeanWorkload>(*meanQueueDescriptor, info);
419 }
420 case LayerType::MemCopy:
421 {
422 auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
423 if (descriptor.m_Inputs.empty())
424 {
425 throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
426 }
427 return std::make_unique<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
428 }
429 case LayerType::MemImport:
430 {
431 auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
432 if (descriptor.m_Inputs.empty())
433 {
434 throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
435 }
436 return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
437 }
438 case LayerType::Minimum:
439 {
440 auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
441 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
442 {
443 return std::make_unique<RefMinimumWorkload<int32_t>>(*minimumQueueDescriptor, info);
444 }
445 else
446 {
447 return std::make_unique<RefMinimumWorkload<float>>(*minimumQueueDescriptor, info);
448 }
449 }
450 case LayerType::Multiplication:
451 {
452 auto multiplicationQueueDescriptor
453 = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
454 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
455 {
456 return std::make_unique<RefMultiplicationWorkload<int32_t>>(*multiplicationQueueDescriptor, info);
457 }
458 else
459 {
460 return std::make_unique<RefMultiplicationWorkload<float>>(*multiplicationQueueDescriptor, info);
461 }
462 }
463 case LayerType::Normalization:
464 {
465 auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
466 return std::make_unique<RefNormalizationWorkload>(*normalizationQueueDescriptor, info);
467 }
468 case LayerType::Output:
469 {
470 auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
471 if (info.m_InputTensorInfos.empty() )
472 {
473 throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length");
474 }
475 if (info.m_OutputTensorInfos.empty())
476 {
477 throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length");
478 }
479 if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
480 {
481 throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output "
482 "differ in byte count.");
483 }
484
485 return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
486 }
487 case LayerType::Pad:
488 {
489 auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
490 return std::make_unique<RefPadWorkload>(*padQueueDescriptor, info);
491 }
492 case LayerType::Permute:
493 {
494 auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
495 if (IsQSymmS16(info))
496 {
497 return std::make_unique<RefPermuteQSymm16Workload>(*permuteQueueDescriptor, info);
498 }
499 else if (IsBFloat16(info))
500 {
501 return std::make_unique<RefPermuteBFloat16Workload>(*permuteQueueDescriptor, info);
502 }
503 else if (IsQAsymmS8(info))
504 {
505 return std::make_unique<RefPermuteQAsymmS8Workload>(*permuteQueueDescriptor, info);
506 }
507 return MakeWorkloadHelper<RefPermuteFloat16Workload, RefPermuteFloat32Workload, RefPermuteQAsymm8Workload,
508 NullWorkload, NullWorkload, NullWorkload>(*permuteQueueDescriptor, info);
509 }
510 case LayerType::Pooling2d:
511 {
512 auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
513 return std::make_unique<RefPooling2dWorkload>(*pooling2dQueueDescriptor, info);
514 }
515 case LayerType::Pooling3d:
516 {
517 auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
518 return std::make_unique<RefPooling3dWorkload>(*pooling3dQueueDescriptor, info);
519 }
520 case LayerType::PreCompiled:
521 {
522 return nullptr;
523 }
524 case LayerType::Prelu:
525 {
526 auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
527 return std::make_unique<RefPreluWorkload>(*preluQueueDescriptor, info);
528 }
529 case LayerType::QLstm:
530 {
531 auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
532 return std::make_unique<RefQLstmWorkload>(*qlstmQueueDescriptor, info);
533 }
534 case LayerType::Quantize:
535 {
536 auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
537 return std::make_unique<RefQuantizeWorkload>(*quantizeQueueDescriptor, info);
538 }
539 case LayerType::Rank:
540 {
541 auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
542 return std::make_unique<RefRankWorkload>(*rankQueueDescriptor, info);
543 }
544 case LayerType::Reduce:
545 {
546 auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
547 return std::make_unique<RefReduceWorkload>(*reduceQueueDescriptor, info);
548 }
549 case LayerType::Reshape:
550 {
551 auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
552 return std::make_unique<RefReshapeWorkload>(*reshapeQueueDescriptor, info);
553 }
554 case LayerType::Resize:
555 {
556 auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
557 return std::make_unique<RefResizeWorkload>(*resizeQueueDescriptor, info);
558 }
559 case LayerType::Shape:
560 {
561 auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
562 return std::make_unique<RefShapeWorkload>(*shapeQueueDescriptor, info);
563 }
564 case LayerType::Slice:
565 {
566 auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
567 return std::make_unique<RefSliceWorkload>(*sliceQueueDescriptor, info);
568 }
569 case LayerType::Softmax:
570 {
571 auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
572 return std::make_unique<RefSoftmaxWorkload>(*softmaxQueueDescriptor, info);
573 }
574 case LayerType::SpaceToBatchNd:
575 {
576 auto spaceToBatchNdQueueDescriptor
577 = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
578 return std::make_unique<RefSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
579 }
580 case LayerType::SpaceToDepth:
581 {
582 auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
583 return std::make_unique<RefSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
584 }
585 case LayerType::Splitter:
586 {
587 auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
588 return std::make_unique<RefSplitterWorkload>(*splitterQueueDescriptor, info);
589 }
590 case LayerType::Stack:
591 {
592 auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
593 return std::make_unique<RefStackWorkload>(*stackQueueDescriptor, info);
594 }
595 case LayerType::StridedSlice:
596 {
597 auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
598 return std::make_unique<RefStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
599 }
600 case LayerType::Subtraction:
601 {
602 auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
603 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
604 {
605 return std::make_unique<RefSubtractionWorkload<int32_t>>(*subtractionQueueDescriptor, info);
606 }
607 else
608 {
609 return std::make_unique<RefSubtractionWorkload<float>>(*subtractionQueueDescriptor, info);
610 }
611 }
612 case LayerType::Transpose:
613 {
614 auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
615 if (IsQSymmS16(info))
616 {
617 return std::make_unique<RefTransposeQSymm16Workload>(*transposeQueueDescriptor, info);
618 }
619 else if (IsBFloat16(info))
620 {
621 return std::make_unique<RefTransposeBFloat16Workload>(*transposeQueueDescriptor, info);
622 }
623 else if (IsQAsymmS8(info))
624 {
625 return std::make_unique<RefTransposeQAsymmS8Workload>(*transposeQueueDescriptor, info);
626 }
627 return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload,
628 RefTransposeQAsymm8Workload, NullWorkload, NullWorkload, NullWorkload>
629 (*transposeQueueDescriptor, info);
630 }
631 case LayerType::TransposeConvolution2d:
632 {
633 auto transposeConvolution2dQueueDescriptor
634 = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
635 return std::make_unique<RefTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor, info);
636 }
637 case LayerType::UnidirectionalSequenceLstm:
638 {
639 auto unidirectionalSequenceLstmQueueDescriptor
640 = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
641 return std::make_unique<RefUnidirectionalSequenceLstmWorkload>(*unidirectionalSequenceLstmQueueDescriptor,
642 info);
643 }
644 default:
645 return nullptr;
646 }
647}
648
Aron Virginas-Tare662a942019-10-14 15:12:00 +0100649std::unique_ptr<IWorkload> RefWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
650 const WorkloadInfo& info) const
651{
652 return std::make_unique<RefActivationWorkload>(descriptor, info);
653}
654
655std::unique_ptr<IWorkload> RefWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
656 const WorkloadInfo& info) const
657{
Finn Williamscbd2c232020-06-22 15:58:32 +0100658 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
659 {
660 return std::make_unique<RefAdditionWorkload<int32_t>>(descriptor, info);
661 }
662 else
663 {
664 return std::make_unique<RefAdditionWorkload<float>>(descriptor, info);
665 }
Aron Virginas-Tare662a942019-10-14 15:12:00 +0100666}
667
668std::unique_ptr<IWorkload> RefWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
669 const WorkloadInfo& info) const
670{
671 return std::make_unique<RefArgMinMaxWorkload>(descriptor, info);
672}
673
674std::unique_ptr<IWorkload> RefWorkloadFactory::CreateBatchNormalization(
675 const BatchNormalizationQueueDescriptor& descriptor,
676 const WorkloadInfo& info) const
677{
678 return std::make_unique<RefBatchNormalizationWorkload>(descriptor, info);
679}
680
681std::unique_ptr<IWorkload> RefWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
682 const WorkloadInfo& info) const
683{
684 return std::make_unique<RefBatchToSpaceNdWorkload>(descriptor, info);
685}
686
mathad01b392e982021-04-07 12:07:30 +0100687std::unique_ptr<IWorkload> RefWorkloadFactory::CreateCast(const CastQueueDescriptor& descriptor,
688 const WorkloadInfo& info) const
689{
690 return std::make_unique<RefCastWorkload>(descriptor, info);
691}
692
Simon Obute51f67772021-09-03 15:50:13 +0100693std::unique_ptr<IWorkload> RefWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor &descriptor,
694 const WorkloadInfo &info) const
695{
696 return std::make_unique<RefChannelShuffleWorkload>(descriptor,info);
697}
698
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100699std::unique_ptr<IWorkload> RefWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& descriptor,
700 const WorkloadInfo& info) const
701{
702 return std::make_unique<RefComparisonWorkload>(descriptor, info);
703}
704
Aron Virginas-Tare662a942019-10-14 15:12:00 +0100705std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
706 const WorkloadInfo& info) const
707{
708 return std::make_unique<RefConcatWorkload>(descriptor, info);
709}
710
711std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
712 const WorkloadInfo& info) const
713{
714 return std::make_unique<RefConstantWorkload>(descriptor, info);
715}
716
Narumol Prangnawarat7ddbbae2020-03-13 10:26:05 +0000717std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertBf16ToFp32(
718 const ConvertBf16ToFp32QueueDescriptor& descriptor,
719 const WorkloadInfo& info) const
720{
721 return std::make_unique<RefConvertBf16ToFp32Workload>(descriptor, info);
722}
723
Aron Virginas-Tare662a942019-10-14 15:12:00 +0100724std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp16ToFp32(
725 const ConvertFp16ToFp32QueueDescriptor& descriptor,
726 const WorkloadInfo& info) const
727{
728 return std::make_unique<RefConvertFp16ToFp32Workload>(descriptor, info);
729}
730
Narumol Prangnawaratea54a012020-03-16 16:36:10 +0000731std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp32ToBf16(
732 const ConvertFp32ToBf16QueueDescriptor& descriptor,
733 const WorkloadInfo& info) const
734{
735 return std::make_unique<RefConvertFp32ToBf16Workload>(descriptor, info);
736}
737
Aron Virginas-Tare662a942019-10-14 15:12:00 +0100738std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp32ToFp16(
739 const ConvertFp32ToFp16QueueDescriptor& descriptor,
740 const WorkloadInfo& info) const
741{
742 return std::make_unique<RefConvertFp32ToFp16Workload>(descriptor, info);
743}
744
745std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
746 const WorkloadInfo& info) const
747{
748 return std::make_unique<RefConvolution2dWorkload>(descriptor, info);
749}
750
Matthew Sloyanb63a3112021-09-08 13:05:51 +0100751std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvolution3d(const Convolution3dQueueDescriptor& descriptor,
752 const WorkloadInfo& info) const
753{
754 return std::make_unique<RefConvolution3dWorkload>(descriptor, info);
755}
756
Aron Virginas-Tare662a942019-10-14 15:12:00 +0100757std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
758 const WorkloadInfo& info) const
759{
Narumol Prangnawarat403a1852020-03-12 14:24:13 +0000760 if (IsBFloat16(info))
761 {
762 return std::make_unique<RefDebugBFloat16Workload>(descriptor, info);
763 }
Aron Virginas-Tardb1a2832019-11-12 16:15:11 +0000764 if (IsFloat16(info))
765 {
766 return std::make_unique<RefDebugFloat16Workload>(descriptor, info);
767 }
Keith Davis0c2eeac2020-02-11 16:51:50 +0000768 if (IsQSymmS16(info))
Aron Virginas-Tare662a942019-10-14 15:12:00 +0100769 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000770 return std::make_unique<RefDebugQSymmS16Workload>(descriptor, info);
Aron Virginas-Tare662a942019-10-14 15:12:00 +0100771 }
Keith Davis0c2eeac2020-02-11 16:51:50 +0000772 if (IsQSymmS8(info))
Keith Davis5204aa82020-01-27 15:24:59 +0000773 {
Keith Davis0c2eeac2020-02-11 16:51:50 +0000774 return std::make_unique<RefDebugQSymmS8Workload>(descriptor, info);
Keith Davis5204aa82020-01-27 15:24:59 +0000775 }
Keith Davis67e6c542020-02-19 10:08:33 +0000776 if (IsQAsymmU8(info))
777 {
778 return std::make_unique<RefDebugQAsymmU8Workload>(descriptor, info);
779 }
780 if (IsQAsymmS8(info))
781 {
782 return std::make_unique<RefDebugQAsymmS8Workload>(descriptor, info);
783 }
Keith Davis0c2eeac2020-02-11 16:51:50 +0000784 if (IsSigned32(info))
Narumol Prangnawaratd2d917d2020-01-09 10:16:39 +0000785 {
786 return std::make_unique<RefDebugSigned32Workload>(descriptor, info);
787 }
Aron Virginas-Tardb1a2832019-11-12 16:15:11 +0000788
Keith Davis0c2eeac2020-02-11 16:51:50 +0000789 return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(descriptor, info);
Aron Virginas-Tare662a942019-10-14 15:12:00 +0100790}
791
792std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
793 const WorkloadInfo& info) const
794{
795 return std::make_unique<RefDepthToSpaceWorkload>(descriptor, info);
796}
797
798std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDepthwiseConvolution2d(
799 const DepthwiseConvolution2dQueueDescriptor& descriptor,
800 const WorkloadInfo& info) const
801{
802 return std::make_unique<RefDepthwiseConvolution2dWorkload>(descriptor, info);
803}
804
805std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDequantize(const DequantizeQueueDescriptor& descriptor,
806 const WorkloadInfo& info) const
807{
808 return std::make_unique<RefDequantizeWorkload>(descriptor, info);
809}
810
811std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDetectionPostProcess(
812 const DetectionPostProcessQueueDescriptor& descriptor,
813 const WorkloadInfo& info) const
814{
815 return std::make_unique<RefDetectionPostProcessWorkload>(descriptor, info);
816}
817
818std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& descriptor,
819 const WorkloadInfo& info) const
820{
Finn Williamscbd2c232020-06-22 15:58:32 +0100821 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
822 {
823 return std::make_unique<RefDivisionWorkload<int32_t>>(descriptor, info);
824 }
825 else
826 {
827 return std::make_unique<RefDivisionWorkload<float>>(descriptor, info);
828 }
Aron Virginas-Tare662a942019-10-14 15:12:00 +0100829}
830
josh minor4a3c6102020-01-06 16:40:46 -0600831std::unique_ptr<IWorkload> RefWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
832 const WorkloadInfo& info) const
833{
Narumol Prangnawarat0c95f4c2020-11-18 16:52:07 +0000834 if (descriptor.m_Parameters.m_Operation == UnaryOperation::LogicalNot)
835 {
836 return std::make_unique<RefLogicalUnaryWorkload>(descriptor, info);
837 }
josh minor4a3c6102020-01-06 16:40:46 -0600838 return std::make_unique<RefElementwiseUnaryWorkload>(descriptor, info);
839}
840
Ryan OSheaf4bfa6a2020-06-10 11:33:37 +0100841std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
842 const WorkloadInfo& info) const
Aron Virginas-Tare662a942019-10-14 15:12:00 +0100843{
844 return MakeWorkload<RefFakeQuantizationFloat32Workload, NullWorkload>(descriptor, info);
845}
846
Ryan OSheaf4bfa6a2020-06-10 11:33:37 +0100847std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFill(const FillQueueDescriptor& descriptor,
848 const WorkloadInfo& info) const
849{
850 return std::make_unique<RefFillWorkload>(descriptor, info);
851}
852
Aron Virginas-Tare662a942019-10-14 15:12:00 +0100853std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
854 const WorkloadInfo& info) const
855{
Francis Murtaghe8ac1332020-07-30 18:03:40 +0100856 if(IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()))
857 {
858 return nullptr;
859 }
860 else
861 {
862 return std::make_unique<RefFloorWorkload>(descriptor, info);
863 }
Aron Virginas-Tare662a942019-10-14 15:12:00 +0100864}
865
866std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFullyConnected(
867 const FullyConnectedQueueDescriptor& descriptor,
868 const WorkloadInfo& info) const
869{
870 return std::make_unique<RefFullyConnectedWorkload>(descriptor, info);
871}
872
873std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGather(const GatherQueueDescriptor& descriptor,
874 const WorkloadInfo& info) const
875{
876 return std::make_unique<RefGatherWorkload>(descriptor, info);
877}
878
telsoa014fcda012018-03-09 14:13:49 +0000879std::unique_ptr<IWorkload> RefWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
880 const WorkloadInfo& info) const
881{
882 if (info.m_InputTensorInfos.empty() )
883 {
884 throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length");
885 }
886 if (info.m_OutputTensorInfos.empty())
887 {
888 throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length");
889 }
890
891 if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
892 {
893 throw InvalidArgumentException("RefWorkloadFactory::CreateInput: data input and output differ in byte count.");
894 }
895
Narumol Prangnawaratb6441e42019-06-04 11:22:00 +0100896 return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
telsoa014fcda012018-03-09 14:13:49 +0000897}
898
Aron Virginas-Tare662a942019-10-14 15:12:00 +0100899std::unique_ptr<IWorkload> RefWorkloadFactory::CreateInstanceNormalization(
900 const InstanceNormalizationQueueDescriptor& descriptor,
901 const WorkloadInfo& info) const
902{
903 return std::make_unique<RefInstanceNormalizationWorkload>(descriptor, info);
904}
905
906std::unique_ptr<IWorkload> RefWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
907 const WorkloadInfo& info) const
908{
909 return std::make_unique<RefL2NormalizationWorkload>(descriptor, info);
910}
911
James Conroyaba90cd2020-11-06 16:28:18 +0000912std::unique_ptr<IWorkload> RefWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
913 const WorkloadInfo& info) const
914{
915 return std::make_unique<RefLogicalBinaryWorkload>(descriptor, info);
916}
917
Aron Virginas-Tare662a942019-10-14 15:12:00 +0100918std::unique_ptr<IWorkload> RefWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
919 const WorkloadInfo& info) const
920{
921 return std::make_unique<RefLogSoftmaxWorkload>(descriptor, info);
922}
923
924std::unique_ptr<IWorkload> RefWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
925 const WorkloadInfo& info) const
926{
927 return std::make_unique<RefLstmWorkload>(descriptor, info);
928}
929
930std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor,
931 const WorkloadInfo& info) const
932{
Finn Williamscbd2c232020-06-22 15:58:32 +0100933 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
934 {
935 return std::make_unique<RefMaximumWorkload<int32_t>>(descriptor, info);
936 }
937 else
938 {
939 return std::make_unique<RefMaximumWorkload<float>>(descriptor, info);
940 }
Aron Virginas-Tare662a942019-10-14 15:12:00 +0100941}
942
943std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
944 const WorkloadInfo& info) const
945{
946 return std::make_unique<RefMeanWorkload>(descriptor, info);
947}
948
949std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
950 const WorkloadInfo& info) const
951{
952 if (descriptor.m_Inputs.empty())
953 {
954 throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
955 }
956 return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
957}
958
959std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& descriptor,
960 const WorkloadInfo& info) const
961{
962 if (descriptor.m_Inputs.empty())
963 {
964 throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
965 }
966 return std::make_unique<ImportMemGenericWorkload>(descriptor, info);
967}
968
Aron Virginas-Tare662a942019-10-14 15:12:00 +0100969std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
970 const WorkloadInfo& info) const
971{
Finn Williamscbd2c232020-06-22 15:58:32 +0100972 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
973 {
974 return std::make_unique<RefMinimumWorkload<int32_t>>(descriptor, info);
975 }
976 else
977 {
978 return std::make_unique<RefMinimumWorkload<float>>(descriptor, info);
979 }
Aron Virginas-Tare662a942019-10-14 15:12:00 +0100980}
981
982std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
983 const WorkloadInfo& info) const
984{
Finn Williamscbd2c232020-06-22 15:58:32 +0100985 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
986 {
987 return std::make_unique<RefMultiplicationWorkload<int32_t>>(descriptor, info);
988 }
989 else
990 {
991 return std::make_unique<RefMultiplicationWorkload<float>>(descriptor, info);
992 }
Aron Virginas-Tare662a942019-10-14 15:12:00 +0100993}
994
995std::unique_ptr<IWorkload> RefWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
996 const WorkloadInfo& info) const
997{
998 return std::make_unique<RefNormalizationWorkload>(descriptor, info);
999}
1000
telsoa014fcda012018-03-09 14:13:49 +00001001std::unique_ptr<IWorkload> RefWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
1002 const WorkloadInfo& info) const
1003{
1004 if (info.m_InputTensorInfos.empty() )
1005 {
1006 throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length");
1007 }
1008 if (info.m_OutputTensorInfos.empty())
1009 {
1010 throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length");
1011 }
1012 if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
1013 {
1014 throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output differ in byte count.");
1015 }
1016
Narumol Prangnawaratb6441e42019-06-04 11:22:00 +01001017 return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
telsoa014fcda012018-03-09 14:13:49 +00001018}
1019
Aron Virginas-Tare662a942019-10-14 15:12:00 +01001020std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
1021 const WorkloadInfo& info) const
telsoa014fcda012018-03-09 14:13:49 +00001022{
Sadik Armagan041b3c02020-06-04 10:32:18 +01001023 return std::make_unique<RefPadWorkload>(descriptor, info);
telsoa014fcda012018-03-09 14:13:49 +00001024}
1025
Aron Virginas-Tare662a942019-10-14 15:12:00 +01001026std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
1027 const WorkloadInfo& info) const
telsoa014fcda012018-03-09 14:13:49 +00001028{
Keith Davis0c2eeac2020-02-11 16:51:50 +00001029 if (IsQSymmS16(info))
Narumol Prangnawarat86bb4e12019-07-08 11:36:05 +01001030 {
1031 return std::make_unique<RefPermuteQSymm16Workload>(descriptor, info);
1032 }
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001033 else if (IsBFloat16(info))
1034 {
1035 return std::make_unique<RefPermuteBFloat16Workload>(descriptor, info);
1036 }
Sadik Armagan303980c2020-04-17 12:45:14 +01001037 else if (IsQAsymmS8(info))
1038 {
1039 return std::make_unique<RefPermuteQAsymmS8Workload>(descriptor, info);
1040 }
Narumol Prangnawarat86bb4e12019-07-08 11:36:05 +01001041 return MakeWorkloadHelper<RefPermuteFloat16Workload, RefPermuteFloat32Workload, RefPermuteQAsymm8Workload,
Keith Davis5204aa82020-01-27 15:24:59 +00001042 NullWorkload, NullWorkload, NullWorkload>(descriptor, info);
telsoa014fcda012018-03-09 14:13:49 +00001043}
1044
Aron Virginas-Tare662a942019-10-14 15:12:00 +01001045std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
1046 const WorkloadInfo& info) const
telsoa014fcda012018-03-09 14:13:49 +00001047{
Teresa Charlina3b20472019-06-06 11:12:32 +01001048 return std::make_unique<RefPooling2dWorkload>(descriptor, info);
telsoa014fcda012018-03-09 14:13:49 +00001049}
1050
Tamás Nyíri7b885b32021-10-26 14:47:57 +01001051std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePooling3d(const Pooling3dQueueDescriptor& descriptor,
1052 const WorkloadInfo& info) const
1053{
1054 return std::make_unique<RefPooling3dWorkload>(descriptor, info);
1055}
1056
Derek Lamberti901ea112019-12-10 22:07:09 +00001057std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
1058 const WorkloadInfo& /*info*/) const
telsoa014fcda012018-03-09 14:13:49 +00001059{
Aron Virginas-Tare662a942019-10-14 15:12:00 +01001060 return nullptr;
telsoa014fcda012018-03-09 14:13:49 +00001061}
1062
Aron Virginas-Tare662a942019-10-14 15:12:00 +01001063std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePrelu(const PreluQueueDescriptor& descriptor,
1064 const WorkloadInfo& info) const
Aron Virginas-Tar73f66422019-09-23 19:11:59 +01001065{
Aron Virginas-Tare662a942019-10-14 15:12:00 +01001066 return std::make_unique<RefPreluWorkload>(descriptor, info);
Aron Virginas-Tar73f66422019-09-23 19:11:59 +01001067}
1068
James Conroy4f1f8992020-04-29 20:01:10 +01001069std::unique_ptr<IWorkload> RefWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& descriptor,
1070 const WorkloadInfo& info) const
1071{
1072 return std::make_unique<RefQLstmWorkload>(descriptor, info);
1073}
1074
Aron Virginas-Tare662a942019-10-14 15:12:00 +01001075std::unique_ptr<IWorkload> RefWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
1076 const WorkloadInfo& info) const
telsoa014fcda012018-03-09 14:13:49 +00001077{
Aron Virginas-Tare662a942019-10-14 15:12:00 +01001078 return std::make_unique<RefQuantizeWorkload>(descriptor, info);
telsoa014fcda012018-03-09 14:13:49 +00001079}
1080
Finn Williams2605b232020-06-10 15:53:46 +01001081std::unique_ptr<IWorkload> RefWorkloadFactory::CreateRank(const RankQueueDescriptor& descriptor,
1082 const WorkloadInfo& info) const
1083{
1084 return std::make_unique<RefRankWorkload>(descriptor, info);
1085}
1086
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00001087std::unique_ptr<IWorkload> RefWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& descriptor,
1088 const WorkloadInfo& info) const
1089{
1090 return std::make_unique<RefReduceWorkload>(descriptor, info);
1091}
1092
Aron Virginas-Tare662a942019-10-14 15:12:00 +01001093std::unique_ptr<IWorkload> RefWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
1094 const WorkloadInfo& info) const
Narumol Prangnawarat94dd5d82019-01-23 18:06:26 +00001095{
Aron Virginas-Tare662a942019-10-14 15:12:00 +01001096 return std::make_unique<RefReshapeWorkload>(descriptor, info);
Derek Lambertif674aa02019-08-01 15:56:25 +01001097}
1098
Teresa Charlin970f43b2019-07-01 13:51:07 +01001099std::unique_ptr<IWorkload> RefWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
1100 const WorkloadInfo& info) const
1101{
Teresa Charlin970f43b2019-07-01 13:51:07 +01001102 return std::make_unique<RefResizeWorkload>(descriptor, info);
1103}
1104
Keith Davis3ae3f972021-05-21 16:33:48 +01001105std::unique_ptr<IWorkload> RefWorkloadFactory::CreateShape(const ShapeQueueDescriptor& descriptor,
1106 const WorkloadInfo& info) const
1107{
1108 return std::make_unique<RefShapeWorkload>(descriptor, info);
1109}
1110
Aron Virginas-Tar92b9f872019-09-17 17:27:04 +01001111std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
1112 const WorkloadInfo& info) const
1113{
1114 return std::make_unique<RefSliceWorkload>(descriptor, info);
1115}
1116
Aron Virginas-Tare662a942019-10-14 15:12:00 +01001117std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
1118 const WorkloadInfo& info) const
Kevin May09ca49c2019-10-09 12:37:34 +01001119{
Aron Virginas-Tare662a942019-10-14 15:12:00 +01001120 return std::make_unique<RefSoftmaxWorkload>(descriptor, info);
1121}
1122
1123std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
1124 const WorkloadInfo& info) const
1125{
1126 return std::make_unique<RefSpaceToBatchNdWorkload>(descriptor, info);
1127}
1128
1129std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
1130 const WorkloadInfo& info) const
1131{
1132 return std::make_unique<RefSpaceToDepthWorkload>(descriptor, info);
1133}
1134
1135std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
1136 const WorkloadInfo& info) const
1137{
1138 return std::make_unique<RefSplitterWorkload>(descriptor, info);
1139}
1140
1141std::unique_ptr<IWorkload> RefWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
1142 const WorkloadInfo& info) const
1143{
1144 return std::make_unique<RefStackWorkload>(descriptor, info);
1145}
1146
1147std::unique_ptr<IWorkload> RefWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
1148 const WorkloadInfo& info) const
1149{
1150 return std::make_unique<RefStridedSliceWorkload>(descriptor, info);
1151}
1152
1153std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
1154 const WorkloadInfo& info) const
1155{
Finn Williamscbd2c232020-06-22 15:58:32 +01001156 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
1157 {
1158 return std::make_unique<RefSubtractionWorkload<int32_t>>(descriptor, info);
1159 }
1160 else
1161 {
1162 return std::make_unique<RefSubtractionWorkload<float>>(descriptor, info);
1163 }
Aron Virginas-Tare662a942019-10-14 15:12:00 +01001164}
1165
Mike Kellyc9ea45a2020-02-28 18:11:58 +00001166std::unique_ptr<IWorkload> RefWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& descriptor,
1167 const WorkloadInfo& info) const
1168{
1169 if (IsQSymmS16(info))
1170 {
1171 return std::make_unique<RefTransposeQSymm16Workload>(descriptor, info);
1172 }
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00001173 else if (IsBFloat16(info))
1174 {
1175 return std::make_unique<RefTransposeBFloat16Workload>(descriptor, info);
1176 }
Sadik Armagan303980c2020-04-17 12:45:14 +01001177 else if (IsQAsymmS8(info))
1178 {
1179 return std::make_unique<RefTransposeQAsymmS8Workload>(descriptor, info);
1180 }
Mike Kellyc9ea45a2020-02-28 18:11:58 +00001181 return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload, RefTransposeQAsymm8Workload,
1182 NullWorkload, NullWorkload, NullWorkload>(descriptor, info);
1183}
1184
Aron Virginas-Tare662a942019-10-14 15:12:00 +01001185std::unique_ptr<IWorkload> RefWorkloadFactory::CreateTransposeConvolution2d(
1186 const TransposeConvolution2dQueueDescriptor& descriptor,
1187 const WorkloadInfo& info) const
1188{
1189 return std::make_unique<RefTransposeConvolution2dWorkload>(descriptor, info);
Kevin May09ca49c2019-10-09 12:37:34 +01001190}
1191
Narumol Prangnawarate5339e72021-07-28 17:33:28 +01001192std::unique_ptr<IWorkload> RefWorkloadFactory::CreateUnidirectionalSequenceLstm(
1193 const UnidirectionalSequenceLstmQueueDescriptor& descriptor,
1194 const WorkloadInfo& info) const
1195{
1196 return std::make_unique<RefUnidirectionalSequenceLstmWorkload>(descriptor, info);;
1197}
1198
Matteo Martincigh49124022019-01-11 13:25:59 +00001199} // namespace armnn