blob: ad6ec9a792d1d3009425e5e453579c5e2dd42ecb [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Mike Kelly3ec30772023-03-08 13:47:17 +00002// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
Matthew Bentham4cefc412019-06-18 16:14:34 +01005#include <Layer.hpp>
Cian McGriskin7894ef92023-08-01 14:04:09 +01006
Derek Lambertif674aa02019-08-01 15:56:25 +01007#include <backendsCommon/MemImportWorkload.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00008#include <backendsCommon/MakeWorkloadHelper.hpp>
Cian McGriskin7894ef92023-08-01 14:04:09 +01009#include <armnn/backends/MemCopyWorkload.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000010#include <armnn/backends/TensorHandle.hpp>
Cian McGriskin7894ef92023-08-01 14:04:09 +010011
telsoa014fcda012018-03-09 14:13:49 +000012#include "RefWorkloadFactory.hpp"
David Beck79141b92018-10-23 16:09:36 +010013#include "RefBackendId.hpp"
Matthew Bentham4cefc412019-06-18 16:14:34 +010014#include "RefTensorHandle.hpp"
Cian McGriskin7894ef92023-08-01 14:04:09 +010015#include "workloads/RefWorkloads.hpp"
telsoa014fcda012018-03-09 14:13:49 +000016
17namespace armnn
18{
19
David Beck79141b92018-10-23 16:09:36 +010020namespace
21{
22static const BackendId s_Id{RefBackendId()};
23}
telsoa014fcda012018-03-09 14:13:49 +000024template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
25std::unique_ptr<IWorkload> RefWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
Aron Virginas-Tare662a942019-10-14 15:12:00 +010026 const WorkloadInfo& info) const
telsoa014fcda012018-03-09 14:13:49 +000027{
Keith Davis5204aa82020-01-27 15:24:59 +000028 return MakeWorkloadHelper<NullWorkload, F32Workload, U8Workload, NullWorkload, NullWorkload, NullWorkload>
Cian McGriskin7894ef92023-08-01 14:04:09 +010029 (descriptor, info);
telsoa014fcda012018-03-09 14:13:49 +000030}
31
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010032template <DataType ArmnnType>
33bool IsDataType(const WorkloadInfo& info)
Jim Flynn82fbe7c2019-04-02 15:19:08 +010034{
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010035 auto checkType = [](const TensorInfo& tensorInfo) {return tensorInfo.GetDataType() == ArmnnType;};
36 auto it = std::find_if(std::begin(info.m_InputTensorInfos), std::end(info.m_InputTensorInfos), checkType);
Jim Flynn82fbe7c2019-04-02 15:19:08 +010037 if (it != std::end(info.m_InputTensorInfos))
38 {
39 return true;
40 }
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010041 it = std::find_if(std::begin(info.m_OutputTensorInfos), std::end(info.m_OutputTensorInfos), checkType);
Jim Flynn82fbe7c2019-04-02 15:19:08 +010042 if (it != std::end(info.m_OutputTensorInfos))
43 {
44 return true;
45 }
46 return false;
47}
John Mcloughlin4cf29d62023-09-25 14:10:32 +010048bool IsSigned64(const WorkloadInfo& info)
49{
50 return IsDataType<DataType::Signed64>(info);
51}
Keith Davis0c2eeac2020-02-11 16:51:50 +000052bool IsSigned32(const WorkloadInfo& info)
53{
54 return IsDataType<DataType::Signed32>(info);
55}
Narumol Prangnawarat44179c32020-03-11 14:51:27 +000056bool IsBFloat16(const WorkloadInfo& info)
57{
58 return IsDataType<DataType::BFloat16>(info);
59}
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010060bool IsFloat16(const WorkloadInfo& info)
61{
62 return IsDataType<DataType::Float16>(info);
63}
Keith Davis0c2eeac2020-02-11 16:51:50 +000064bool IsQSymmS16(const WorkloadInfo& info)
nikraj0199a66312019-06-06 10:31:27 +010065{
Derek Lambertif90c56d2020-01-10 17:14:08 +000066 return IsDataType<DataType::QSymmS16>(info);
nikraj0199a66312019-06-06 10:31:27 +010067}
Keith Davis0c2eeac2020-02-11 16:51:50 +000068bool IsQSymmS8(const WorkloadInfo& info)
Keith Davis5204aa82020-01-27 15:24:59 +000069{
70 return IsDataType<DataType::QSymmS8>(info);
71}
Keith Davis67e6c542020-02-19 10:08:33 +000072bool IsQAsymmS8(const WorkloadInfo& info)
73{
74 return IsDataType<DataType::QAsymmS8>(info);
75}
Keith Davis67e6c542020-02-19 10:08:33 +000076bool IsQAsymmU8(const WorkloadInfo& info)
77{
78 return IsDataType<DataType::QAsymmU8>(info);
79}
80
Matthew Bentham7c1603a2019-06-21 17:22:23 +010081RefWorkloadFactory::RefWorkloadFactory(const std::shared_ptr<RefMemoryManager>& memoryManager)
82 : m_MemoryManager(memoryManager)
83{
84}
85
telsoa01c577f2c2018-08-31 09:22:23 +010086RefWorkloadFactory::RefWorkloadFactory()
Matthew Bentham7c1603a2019-06-21 17:22:23 +010087 : m_MemoryManager(new RefMemoryManager())
telsoa014fcda012018-03-09 14:13:49 +000088{
89}
90
David Beck79141b92018-10-23 16:09:36 +010091const BackendId& RefWorkloadFactory::GetBackendId() const
92{
93 return s_Id;
94}
95
David Beck29c75de2018-10-23 13:35:58 +010096bool RefWorkloadFactory::IsLayerSupported(const Layer& layer,
97 Optional<DataType> dataType,
telsoa01c577f2c2018-08-31 09:22:23 +010098 std::string& outReasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000099{
David Beck79141b92018-10-23 16:09:36 +0100100 return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
telsoa014fcda012018-03-09 14:13:49 +0000101}
102
Sadik Armagan04a72972020-09-14 15:44:18 +0100103bool RefWorkloadFactory::IsLayerSupported(const IConnectableLayer& layer,
104 Optional<DataType> dataType,
105 std::string& outReasonIfUnsupported,
106 const ModelOptions& modelOptions)
107{
108 return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
109}
110
David Monahan3fb7e102019-08-20 11:25:29 +0100111std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
Derek Lamberti901ea112019-12-10 22:07:09 +0000112 const bool isMemoryManaged) const
telsoa014fcda012018-03-09 14:13:49 +0000113{
Finn Williamsb1aad422021-10-28 19:07:32 +0100114 if (isMemoryManaged)
115 {
116 return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
117 }
118 else
119 {
Matthew Benthamc30abd82022-11-23 12:11:32 +0000120 return std::make_unique<RefTensorHandle>(tensorInfo);
Finn Williamsb1aad422021-10-28 19:07:32 +0100121 }
telsoa014fcda012018-03-09 14:13:49 +0000122}
123
Francis Murtagh351d13d2018-09-24 15:01:18 +0100124std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
David Monahan3fb7e102019-08-20 11:25:29 +0100125 DataLayout dataLayout,
Derek Lamberti901ea112019-12-10 22:07:09 +0000126 const bool isMemoryManaged) const
Francis Murtagh351d13d2018-09-24 15:01:18 +0100127{
David Monahan3fb7e102019-08-20 11:25:29 +0100128 // For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
129 // to unmanaged memory. This also ensures memory alignment.
Jan Eilers8eb25602020-03-09 12:13:48 +0000130 IgnoreUnused(isMemoryManaged, dataLayout);
Finn Williamsb1aad422021-10-28 19:07:32 +0100131
132 if (isMemoryManaged)
133 {
134 return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
135 }
136 else
137 {
Matthew Benthamc30abd82022-11-23 12:11:32 +0000138 return std::make_unique<RefTensorHandle>(tensorInfo);
Finn Williamsb1aad422021-10-28 19:07:32 +0100139 }
Francis Murtagh351d13d2018-09-24 15:01:18 +0100140}
141
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000142std::unique_ptr<IWorkload> RefWorkloadFactory::CreateWorkload(LayerType type,
143 const QueueDescriptor& descriptor,
144 const WorkloadInfo& info) const
145{
146 switch(type)
147 {
148 case LayerType::Activation :
149 {
150 auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
151 return std::make_unique<RefActivationWorkload>(*activationQueueDescriptor, info);
152 }
153 case LayerType::Addition :
154 {
155 auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000156 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
157 {
158 return std::make_unique<RefAdditionWorkload<int32_t>>(*additionQueueDescriptor, info);
159 }
160 else
161 {
162 return std::make_unique<RefAdditionWorkload<float>>(*additionQueueDescriptor, info);
163 }
164 }
165 case LayerType::ArgMinMax :
166 {
167 auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
168 return std::make_unique<RefArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info);
169 }
Samuel Yap6b478092022-07-06 15:36:03 +0100170 case LayerType::BatchMatMul:
171 {
172 auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
173 return std::make_unique<RefBatchMatMulWorkload>(*batchMatMulQueueDescriptor, info);
174 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000175 case LayerType::BatchNormalization :
176 {
177 auto batchNormQueueDescriptor = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
178 return std::make_unique<RefBatchNormalizationWorkload>(*batchNormQueueDescriptor, info);
179 }
180 case LayerType::BatchToSpaceNd :
181 {
182 auto batchToSpaceNdQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100183 = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000184 return std::make_unique<RefBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info);
Cian McGriskin7894ef92023-08-01 14:04:09 +0100185 }
Idriss Chaouch98e383e2023-08-28 14:28:31 +0100186 case LayerType::BroadcastTo:
187 {
188 auto broadcastToQueueDescriptor = PolymorphicDowncast<const BroadcastToQueueDescriptor*>(&descriptor);
189 return std::make_unique<RefBroadcastToWorkload>(*broadcastToQueueDescriptor, info);
190 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000191 case LayerType::Cast :
192 {
193 auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
194 return std::make_unique<RefCastWorkload>(*castQueueDescriptor, info);
195 }
196 case LayerType::ChannelShuffle :
197 {
198 auto channelShuffleQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100199 = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000200 return std::make_unique<RefChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info);
201 }
202 case LayerType::Comparison :
203 {
204 auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
205 return std::make_unique<RefComparisonWorkload>(*comparisonQueueDescriptor, info);
206 }
207 case LayerType::Concat :
208 {
209 auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
210 return std::make_unique<RefConcatWorkload>(*concatQueueDescriptor, info);
211 }
212 case LayerType::Constant :
213 {
214 auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
215 return std::make_unique<RefConstantWorkload>(*constantQueueDescriptor, info);
216 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000217 case LayerType::ConvertFp16ToFp32:
218 {
219 auto convertFp16ToFp32QueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100220 = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000221 return std::make_unique<RefConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info);
222 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000223 case LayerType::ConvertFp32ToFp16:
224 {
225 auto convertFp32ToFp16QueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100226 = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000227 return std::make_unique<RefConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor, info);
228 }
229 case LayerType::Convolution2d:
230 {
231 auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
232 return std::make_unique<RefConvolution2dWorkload>(*convolution2dQueueDescriptor, info);
233 }
234 case LayerType::Convolution3d:
235 {
236 auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
237 return std::make_unique<RefConvolution3dWorkload>(*convolution3dQueueDescriptor, info);
238 }
239 case LayerType::Debug:
240 {
241 auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
242 if (IsBFloat16(info))
243 {
244 return std::make_unique<RefDebugBFloat16Workload>(*debugQueueDescriptor, info);
245 }
246 if (IsFloat16(info))
247 {
248 return std::make_unique<RefDebugFloat16Workload>(*debugQueueDescriptor, info);
249 }
250 if (IsQSymmS16(info))
251 {
252 return std::make_unique<RefDebugQSymmS16Workload>(*debugQueueDescriptor, info);
253 }
254 if (IsQSymmS8(info))
255 {
256 return std::make_unique<RefDebugQSymmS8Workload>(*debugQueueDescriptor, info);
257 }
258 if (IsQAsymmU8(info))
259 {
260 return std::make_unique<RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
261 }
262 if (IsQAsymmS8(info))
263 {
264 return std::make_unique<RefDebugQAsymmS8Workload>(*debugQueueDescriptor, info);
265 }
266 if (IsSigned32(info))
267 {
268 return std::make_unique<RefDebugSigned32Workload>(*debugQueueDescriptor, info);
269 }
John Mcloughlin4cf29d62023-09-25 14:10:32 +0100270 if (IsSigned64(info))
271 {
272 return std::make_unique<RefDebugSigned64Workload>(*debugQueueDescriptor, info);
273 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000274 return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymmU8Workload>(*debugQueueDescriptor, info);
275 }
276 case LayerType::DepthToSpace:
277 {
278 auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
279 return std::make_unique<RefDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info);
280 }
281 case LayerType::DepthwiseConvolution2d:
282 {
283 auto depthwiseConvolution2DQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100284 = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000285 return std::make_unique<RefDepthwiseConvolution2dWorkload>(*depthwiseConvolution2DQueueDescriptor, info);
286 }
287 case LayerType::Dequantize:
288 {
289 auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
290 return std::make_unique<RefDequantizeWorkload>(*dequantizeQueueDescriptor, info);
291 }
292 case LayerType::DetectionPostProcess:
293 {
294 auto detectionPostProcessQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100295 = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000296 return std::make_unique<RefDetectionPostProcessWorkload>(*detectionPostProcessQueueDescriptor, info);
297 }
298 case LayerType::Division:
299 {
300 auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
301 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
302 {
303 return std::make_unique<RefDivisionWorkload<int32_t>>(*divisionQueueDescriptor, info);
304 }
305 else
306 {
307 return std::make_unique<RefDivisionWorkload<float>>(*divisionQueueDescriptor, info);
308 }
309 }
Mike Kelly3ec30772023-03-08 13:47:17 +0000310 case LayerType::ElementwiseBinary:
311 {
312 auto elementwiseBinaryQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100313 = PolymorphicDowncast<const ElementwiseBinaryQueueDescriptor*>(&descriptor);
Mike Kelly3ec30772023-03-08 13:47:17 +0000314 return std::make_unique<RefElementwiseBinaryWorkload>(*elementwiseBinaryQueueDescriptor, info);
315 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000316 case LayerType::ElementwiseUnary:
317 {
318 auto elementwiseUnaryQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100319 = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000320 if ((*elementwiseUnaryQueueDescriptor).m_Parameters.m_Operation == UnaryOperation::LogicalNot)
321 {
322 return std::make_unique<RefLogicalUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
323 }
324 return std::make_unique<RefElementwiseUnaryWorkload>(*elementwiseUnaryQueueDescriptor, info);
325 }
326 case LayerType::FakeQuantization:
327 {
328 auto fakeQuantizationQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100329 = PolymorphicDowncast<const FakeQuantizationQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000330 return std::make_unique<RefFakeQuantizationFloat32Workload>(*fakeQuantizationQueueDescriptor, info);
331 }
332 case LayerType::Fill:
333 {
334 auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
335 return std::make_unique<RefFillWorkload>(*fillQueueDescriptor, info);
336 }
337 case LayerType::Floor:
338 {
339 auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
340 if(IsQuantizedType(info.m_InputTensorInfos[0].GetDataType()))
341 {
342 return nullptr;
343 }
344 else
345 {
346 return std::make_unique<RefFloorWorkload>(*floorQueueDescriptor, info);
347 }
348 }
349 case LayerType::FullyConnected:
350 {
351 auto fullyConnectedQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100352 = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000353 return std::make_unique<RefFullyConnectedWorkload>(*fullyConnectedQueueDescriptor, info);
354 }
355 case LayerType::Gather:
356 {
357 auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
358 return std::make_unique<RefGatherWorkload>(*gatherQueueDescriptor, info);
359 }
Teresa Charlinb2d3ec52022-04-12 22:07:09 +0100360 case LayerType::GatherNd:
361 {
362 auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
363 return std::make_unique<RefGatherNdWorkload>(*gatherNdQueueDescriptor, info);
364 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000365 case LayerType::Input:
366 {
367 auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
368 if (info.m_InputTensorInfos.empty() )
369 {
370 throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Input cannot be zero length");
371 }
372 if (info.m_OutputTensorInfos.empty())
373 {
374 throw InvalidArgumentException("RefWorkloadFactory::CreateInput: Output cannot be zero length");
375 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000376 if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
377 {
378 throw InvalidArgumentException("RefWorkloadFactory::CreateInput: "
379 "data input and output differ in byte count.");
380 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000381 return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
382 }
383 case LayerType::InstanceNormalization:
384 {
385 auto instanceNormalizationQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100386 = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000387 return std::make_unique<RefInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor, info);
388 }
389 case LayerType::L2Normalization:
390 {
391 auto l2NormalizationQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100392 = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000393 return std::make_unique<RefL2NormalizationWorkload>(*l2NormalizationQueueDescriptor, info);
394 }
395 case LayerType::LogicalBinary:
396 {
397 auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
398 return std::make_unique<RefLogicalBinaryWorkload>(*logicalBinaryQueueDescriptor, info);
399 }
400 case LayerType::LogSoftmax:
401 {
402 auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
403 return std::make_unique<RefLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor, info);
404 }
405 case LayerType::Lstm:
406 {
407 auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
408 return std::make_unique<RefLstmWorkload>(*lstmQueueDescriptor, info);
409 }
410 case LayerType::Maximum:
411 {
412 auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
413 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
414 {
415 return std::make_unique<RefMaximumWorkload<int32_t>>(*maximumQueueDescriptor, info);
416 }
417 else
418 {
419 return std::make_unique<RefMaximumWorkload<float>>(*maximumQueueDescriptor, info);
420 }
421 }
422 case LayerType::Mean:
423 {
424 auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
425 return std::make_unique<RefMeanWorkload>(*meanQueueDescriptor, info);
426 }
427 case LayerType::MemCopy:
428 {
429 auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
430 if (descriptor.m_Inputs.empty())
431 {
432 throw InvalidArgumentException("RefWorkloadFactory: CreateMemCopy() expected an input tensor.");
433 }
434 return std::make_unique<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
435 }
436 case LayerType::MemImport:
437 {
438 auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
439 if (descriptor.m_Inputs.empty())
440 {
441 throw InvalidArgumentException("RefWorkloadFactory: CreateMemImport() expected an input tensor.");
442 }
443 return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
444 }
445 case LayerType::Minimum:
446 {
447 auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
448 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
449 {
450 return std::make_unique<RefMinimumWorkload<int32_t>>(*minimumQueueDescriptor, info);
451 }
452 else
453 {
454 return std::make_unique<RefMinimumWorkload<float>>(*minimumQueueDescriptor, info);
455 }
456 }
457 case LayerType::Multiplication:
458 {
459 auto multiplicationQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100460 = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000461 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
462 {
463 return std::make_unique<RefMultiplicationWorkload<int32_t>>(*multiplicationQueueDescriptor, info);
464 }
465 else
466 {
467 return std::make_unique<RefMultiplicationWorkload<float>>(*multiplicationQueueDescriptor, info);
468 }
469 }
470 case LayerType::Normalization:
471 {
472 auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
473 return std::make_unique<RefNormalizationWorkload>(*normalizationQueueDescriptor, info);
474 }
475 case LayerType::Output:
476 {
477 auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
478 if (info.m_InputTensorInfos.empty() )
479 {
480 throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Input cannot be zero length");
481 }
482 if (info.m_OutputTensorInfos.empty())
483 {
484 throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: Output cannot be zero length");
485 }
486 if (info.m_InputTensorInfos[0].GetNumBytes() != info.m_OutputTensorInfos[0].GetNumBytes())
487 {
488 throw InvalidArgumentException("RefWorkloadFactory::CreateOutput: data input and output "
489 "differ in byte count.");
490 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000491 return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
492 }
493 case LayerType::Pad:
494 {
495 auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
496 return std::make_unique<RefPadWorkload>(*padQueueDescriptor, info);
497 }
498 case LayerType::Permute:
499 {
500 auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
501 if (IsQSymmS16(info))
502 {
503 return std::make_unique<RefPermuteQSymm16Workload>(*permuteQueueDescriptor, info);
504 }
505 else if (IsBFloat16(info))
506 {
507 return std::make_unique<RefPermuteBFloat16Workload>(*permuteQueueDescriptor, info);
508 }
509 else if (IsQAsymmS8(info))
510 {
511 return std::make_unique<RefPermuteQAsymmS8Workload>(*permuteQueueDescriptor, info);
512 }
513 return MakeWorkloadHelper<RefPermuteFloat16Workload, RefPermuteFloat32Workload, RefPermuteQAsymm8Workload,
Cian McGriskin7894ef92023-08-01 14:04:09 +0100514 NullWorkload, NullWorkload, NullWorkload>(*permuteQueueDescriptor, info);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000515 }
516 case LayerType::Pooling2d:
517 {
518 auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
519 return std::make_unique<RefPooling2dWorkload>(*pooling2dQueueDescriptor, info);
520 }
521 case LayerType::Pooling3d:
522 {
523 auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
524 return std::make_unique<RefPooling3dWorkload>(*pooling3dQueueDescriptor, info);
525 }
526 case LayerType::PreCompiled:
527 {
528 return nullptr;
529 }
530 case LayerType::Prelu:
531 {
532 auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
533 return std::make_unique<RefPreluWorkload>(*preluQueueDescriptor, info);
534 }
535 case LayerType::QLstm:
536 {
537 auto qlstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
538 return std::make_unique<RefQLstmWorkload>(*qlstmQueueDescriptor, info);
539 }
540 case LayerType::Quantize:
541 {
542 auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
543 return std::make_unique<RefQuantizeWorkload>(*quantizeQueueDescriptor, info);
544 }
545 case LayerType::Rank:
546 {
547 auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
548 return std::make_unique<RefRankWorkload>(*rankQueueDescriptor, info);
549 }
550 case LayerType::Reduce:
551 {
552 auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
553 return std::make_unique<RefReduceWorkload>(*reduceQueueDescriptor, info);
554 }
555 case LayerType::Reshape:
556 {
557 auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
558 return std::make_unique<RefReshapeWorkload>(*reshapeQueueDescriptor, info);
559 }
560 case LayerType::Resize:
561 {
562 auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
563 return std::make_unique<RefResizeWorkload>(*resizeQueueDescriptor, info);
564 }
Tianle Cheng988354d2023-06-28 13:20:47 +0100565 case LayerType::ReverseV2:
566 {
567 auto reverseV2QueueDescriptor = PolymorphicDowncast<const ReverseV2QueueDescriptor*>(&descriptor);
568 return std::make_unique<RefReverseV2Workload>(*reverseV2QueueDescriptor, info);
569 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000570 case LayerType::Shape:
571 {
572 auto shapeQueueDescriptor = PolymorphicDowncast<const ShapeQueueDescriptor*>(&descriptor);
573 return std::make_unique<RefShapeWorkload>(*shapeQueueDescriptor, info);
574 }
575 case LayerType::Slice:
576 {
577 auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
578 return std::make_unique<RefSliceWorkload>(*sliceQueueDescriptor, info);
579 }
580 case LayerType::Softmax:
581 {
582 auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
583 return std::make_unique<RefSoftmaxWorkload>(*softmaxQueueDescriptor, info);
584 }
585 case LayerType::SpaceToBatchNd:
586 {
587 auto spaceToBatchNdQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100588 = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000589 return std::make_unique<RefSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info);
590 }
591 case LayerType::SpaceToDepth:
592 {
593 auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
594 return std::make_unique<RefSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info);
595 }
596 case LayerType::Splitter:
597 {
598 auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
599 return std::make_unique<RefSplitterWorkload>(*splitterQueueDescriptor, info);
600 }
601 case LayerType::Stack:
602 {
603 auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
604 return std::make_unique<RefStackWorkload>(*stackQueueDescriptor, info);
605 }
606 case LayerType::StridedSlice:
607 {
608 auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
609 return std::make_unique<RefStridedSliceWorkload>(*stridedSliceQueueDescriptor, info);
610 }
611 case LayerType::Subtraction:
612 {
613 auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
614 if (info.m_InputTensorInfos[0].GetDataType() == armnn::DataType::Signed32)
615 {
616 return std::make_unique<RefSubtractionWorkload<int32_t>>(*subtractionQueueDescriptor, info);
617 }
618 else
619 {
620 return std::make_unique<RefSubtractionWorkload<float>>(*subtractionQueueDescriptor, info);
621 }
622 }
Teresa Charlin79a06a52023-07-13 17:16:45 +0100623 case LayerType::Tile:
624 {
625 auto tileQueueDescriptor = PolymorphicDowncast<const TileQueueDescriptor*>(&descriptor);
626 return std::make_unique<RefTileWorkload>(*tileQueueDescriptor, info);
627 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000628 case LayerType::Transpose:
629 {
630 auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
631 if (IsQSymmS16(info))
632 {
633 return std::make_unique<RefTransposeQSymm16Workload>(*transposeQueueDescriptor, info);
634 }
635 else if (IsBFloat16(info))
636 {
637 return std::make_unique<RefTransposeBFloat16Workload>(*transposeQueueDescriptor, info);
638 }
639 else if (IsQAsymmS8(info))
640 {
641 return std::make_unique<RefTransposeQAsymmS8Workload>(*transposeQueueDescriptor, info);
642 }
643 return MakeWorkloadHelper<RefTransposeFloat16Workload, RefTransposeFloat32Workload,
Cian McGriskin7894ef92023-08-01 14:04:09 +0100644 RefTransposeQAsymm8Workload, NullWorkload, NullWorkload, NullWorkload>
645 (*transposeQueueDescriptor, info);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000646 }
647 case LayerType::TransposeConvolution2d:
648 {
649 auto transposeConvolution2dQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100650 = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000651 return std::make_unique<RefTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor, info);
652 }
653 case LayerType::UnidirectionalSequenceLstm:
654 {
655 auto unidirectionalSequenceLstmQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100656 = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000657 return std::make_unique<RefUnidirectionalSequenceLstmWorkload>(*unidirectionalSequenceLstmQueueDescriptor,
658 info);
659 }
660 default:
661 return nullptr;
662 }
663}
664
Matteo Martincigh49124022019-01-11 13:25:59 +0000665} // namespace armnn