blob: d4a1cb081d2fa76fcd70175bb824b28a0e4bbec0 [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Teresa Charlin8398edc2020-07-20 14:23:02 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "ClWorkloadFactory.hpp"
David Beck79141b92018-10-23 16:09:36 +01006#include "ClBackendId.hpp"
Sadik Armagan04a72972020-09-14 15:44:18 +01007#include "ClBackendModelContext.hpp"
Matthew Sloyan80fbcd52021-01-07 13:28:47 +00008#include "ClContextDeserializer.hpp"
9#include "ClContextSerializer.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000011#include <Layer.hpp>
12
David Beck0dbe0ee2018-09-24 15:59:27 +010013#include <armnn/Exceptions.hpp>
Sadik Armagan6b9eba22021-11-01 07:27:50 +000014#include <armnn/Logging.hpp>
David Beck0dbe0ee2018-09-24 15:59:27 +010015#include <armnn/Utils.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000016#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan171214c2020-09-09 09:07:37 +010017#include <armnn/utility/NumericCast.hpp>
Jan Eilersbb446e52020-04-02 13:56:54 +010018#include <armnn/utility/PolymorphicDowncast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000019
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/MakeWorkloadHelper.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000021#include <armnn/backends/MemCopyWorkload.hpp>
Derek Lambertif674aa02019-08-01 15:56:25 +010022#include <backendsCommon/MemImportWorkload.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000023#include <armnn/backends/TensorHandle.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010024
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000025#include <cl/ClTensorHandle.hpp>
26#include <cl/workloads/ClWorkloads.hpp>
27#include <cl/workloads/ClWorkloadUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000028
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000029#include <arm_compute/core/CL/CLKernelLibrary.h>
30#include <arm_compute/runtime/CL/CLBufferAllocator.h>
31#include <arm_compute/runtime/CL/CLScheduler.h>
telsoa014fcda012018-03-09 14:13:49 +000032
Rob Hughes9542f902021-07-14 09:48:54 +010033#include <armnnUtils/Filesystem.hpp>
Matthew Sloyan80fbcd52021-01-07 13:28:47 +000034#include <fstream>
Sadik Armagandea8fb62020-11-26 10:38:11 +000035
Sadik Armaganb7851f92021-10-06 16:37:02 +010036#include <sys/stat.h>
37
telsoa014fcda012018-03-09 14:13:49 +000038namespace armnn
39{
40
David Beck79141b92018-10-23 16:09:36 +010041namespace
42{
43static const BackendId s_Id{ClBackendId()};
44}
45
telsoa01c577f2c2018-08-31 09:22:23 +010046bool ClWorkloadFactory::IsLayerSupported(const Layer& layer,
David Beck29c75de2018-10-23 13:35:58 +010047 Optional<DataType> dataType,
telsoa01c577f2c2018-08-31 09:22:23 +010048 std::string& outReasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000049{
David Beck79141b92018-10-23 16:09:36 +010050 return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
51}
52
Sadik Armagan04a72972020-09-14 15:44:18 +010053bool ClWorkloadFactory::IsLayerSupported(const IConnectableLayer& layer,
54 Optional<DataType> dataType,
55 std::string& outReasonIfUnsupported,
56 const ModelOptions& modelOptions)
57{
58 return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
59}
60
David Beck79141b92018-10-23 16:09:36 +010061const BackendId& ClWorkloadFactory::GetBackendId() const
62{
63 return s_Id;
telsoa014fcda012018-03-09 14:13:49 +000064}
65
Sadik Armagandea8fb62020-11-26 10:38:11 +000066void ClWorkloadFactory::AfterWorkloadsCreated()
67{
68 if(m_ModelContextPtr)
69 {
70 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
71 if (modelOptions->SaveCachedNetwork())
72 {
Sadik Armaganb7851f92021-10-06 16:37:02 +010073 ClContextSerializer serializer;
74 serializer.Serialize(m_CLCompileContext);
75 auto cachedFd = modelOptions->GetCachedFileDescriptor();
76 if (cachedFd != -1)
77 {
78 std::vector<uint8_t> compiledContextData;
79 std::stringstream stream;
80 bool serialized = serializer.SaveSerializedToStream(stream);
81 if (serialized)
82 {
83 std::string const serializedString{stream.str()};
84 std::copy(serializedString.begin(),
85 serializedString.end(),
86 std::back_inserter(compiledContextData));
Sadik Armagan6b9eba22021-11-01 07:27:50 +000087 auto success = write(cachedFd, compiledContextData.data(), compiledContextData.size());
88 if (success == -1)
89 {
90 ARMNN_LOG(info) << "ClWorkloadFactory:: Could not cache the compiled context!";
91 }
Sadik Armaganb7851f92021-10-06 16:37:02 +010092 }
93 }
94
Sadik Armagandea8fb62020-11-26 10:38:11 +000095 // Save map to a filepath provided in ModelOptions
96 auto filePath = modelOptions->GetCachedNetworkFilePath();
97 if (filePath != "" && fs::exists(filePath) && fs::is_regular_file(filePath))
98 {
Matthew Sloyan80fbcd52021-01-07 13:28:47 +000099 // Serialize ClContext to the file specified
Matthew Sloyan80fbcd52021-01-07 13:28:47 +0000100 std::ofstream file(filePath, std::ios::out | std::ios::binary);
101 serializer.SaveSerializedToStream(file);
Sadik Armagandea8fb62020-11-26 10:38:11 +0000102 }
103 }
104 }
105}
106
Aron Virginas-Tara8e06ed2018-10-19 16:46:15 +0100107template <typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
108std::unique_ptr<IWorkload> ClWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
109 const WorkloadInfo& info,
110 Args&&... args)
111{
112 try
113 {
114 return MakeWorkloadHelper<FloatWorkload, Uint8Workload>(descriptor, info, std::forward<Args>(args)...);
115 }
116 catch (const cl::Error& clError)
117 {
118 throw WrapClError(clError, CHECK_LOCATION());
119 }
120}
121
122template <typename Workload, typename QueueDescriptorType, typename... Args>
123std::unique_ptr<IWorkload> ClWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
124 const WorkloadInfo& info,
125 Args&&... args)
126{
127 try
128 {
129 return std::make_unique<Workload>(descriptor, info, std::forward<Args>(args)...);
130 }
131 catch (const cl::Error& clError)
132 {
133 throw WrapClError(clError, CHECK_LOCATION());
134 }
135}
136
Sadik Armagandea8fb62020-11-26 10:38:11 +0000137void ClWorkloadFactory::InitializeCLCompileContext()
138{
139 // Initialize our m_CLCompileContext using default device and context
Sadik Armagane9444752020-12-02 11:28:58 +0000140 auto context = arm_compute::CLKernelLibrary::get().context();
141 auto device = arm_compute::CLKernelLibrary::get().get_device();
Sadik Armagandea8fb62020-11-26 10:38:11 +0000142 m_CLCompileContext = arm_compute::CLCompileContext(context, device);
143
144 if (m_ModelContextPtr)
145 {
146 // Load saved programs if the user has set a filepath
147 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
148 auto filePath = modelOptions->GetCachedNetworkFilePath();
Sadik Armaganb7851f92021-10-06 16:37:02 +0100149 if (!(modelOptions->SaveCachedNetwork()))
Sadik Armagandea8fb62020-11-26 10:38:11 +0000150 {
Matthew Sloyan80fbcd52021-01-07 13:28:47 +0000151 ClContextDeserializer deserializer;
Sadik Armaganb7851f92021-10-06 16:37:02 +0100152 auto cachedFd = modelOptions->GetCachedFileDescriptor();
153 if (cachedFd != -1)
154 {
155 struct stat statBuffer;
156 if (fstat(cachedFd, &statBuffer) == 0)
157 {
158 long dataSize = static_cast<long>(statBuffer.st_size);
159 if( dataSize > 0)
160 {
161 auto offset = lseek(cachedFd, 0, SEEK_CUR);
162 if (offset == 0)
163 {
164 std::vector <uint8_t> compiledContextData(static_cast<unsigned int>(dataSize));
Sadik Armagan6b9eba22021-11-01 07:27:50 +0000165 auto success = pread(cachedFd, compiledContextData.data(), compiledContextData.size(), 0);
166 if (success != -1)
167 {
168 deserializer.DeserializeFromBinary(m_CLCompileContext,
169 context,
170 device,
171 compiledContextData);
172 }
Sadik Armaganb7851f92021-10-06 16:37:02 +0100173 }
174 }
175
176 }
177 }
178
179 if (filePath != "" && fs::exists(filePath) && fs::is_regular_file(filePath))
180 {
181 // Deserialize binary file and load into m_CLCompileContext
182 deserializer.Deserialize(m_CLCompileContext, context, device, filePath);
183 }
Sadik Armagandea8fb62020-11-26 10:38:11 +0000184 }
185 }
186}
187
Aron Virginas-Tar56055192018-11-12 18:10:43 +0000188ClWorkloadFactory::ClWorkloadFactory(const std::shared_ptr<ClMemoryManager>& memoryManager)
Sadik Armagan04a72972020-09-14 15:44:18 +0100189 : m_MemoryManager(memoryManager), m_ModelContextPtr(IBackendInternal::IBackendSpecificModelContextPtr{})
190{
Sadik Armagandea8fb62020-11-26 10:38:11 +0000191 InitializeCLCompileContext();
Sadik Armagan04a72972020-09-14 15:44:18 +0100192}
193
194ClWorkloadFactory::ClWorkloadFactory(const std::shared_ptr<ClMemoryManager>& memoryManager,
195 const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr)
196 : m_MemoryManager(memoryManager), m_ModelContextPtr(modelContextPtr)
telsoa014fcda012018-03-09 14:13:49 +0000197{
Sadik Armagandea8fb62020-11-26 10:38:11 +0000198 InitializeCLCompileContext();
telsoa014fcda012018-03-09 14:13:49 +0000199}
200
David Monahan3fb7e102019-08-20 11:25:29 +0100201std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
202 const bool IsMemoryManaged) const
telsoa014fcda012018-03-09 14:13:49 +0000203{
Jan Eilers8eb25602020-03-09 12:13:48 +0000204 IgnoreUnused(IsMemoryManaged);
telsoa01c577f2c2018-08-31 09:22:23 +0100205 std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo);
Aron Virginas-Tar56055192018-11-12 18:10:43 +0000206 tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
telsoa01c577f2c2018-08-31 09:22:23 +0100207
208 return tensorHandle;
telsoa014fcda012018-03-09 14:13:49 +0000209}
210
Francis Murtagh351d13d2018-09-24 15:01:18 +0100211std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
David Monahan3fb7e102019-08-20 11:25:29 +0100212 DataLayout dataLayout,
213 const bool IsMemoryManaged) const
Francis Murtagh351d13d2018-09-24 15:01:18 +0100214{
Jan Eilers8eb25602020-03-09 12:13:48 +0000215 IgnoreUnused(IsMemoryManaged);
Francis Murtagh351d13d2018-09-24 15:01:18 +0100216 std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo, dataLayout);
Aron Virginas-Tar56055192018-11-12 18:10:43 +0000217 tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
Francis Murtagh351d13d2018-09-24 15:01:18 +0100218
219 return tensorHandle;
220}
221
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100222std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent,
223 TensorShape const& subTensorShape,
telsoa014fcda012018-03-09 14:13:49 +0000224 unsigned int const* subTensorOrigin) const
225{
telsoa014fcda012018-03-09 14:13:49 +0000226 arm_compute::Coordinates coords;
227 arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
228
229 coords.set_num_dimensions(subTensorShape.GetNumDimensions());
230 for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); i++)
231 {
telsoa01c577f2c2018-08-31 09:22:23 +0100232 // Arm compute indexes tensor coords in reverse order.
telsoa014fcda012018-03-09 14:13:49 +0000233 unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
Matthew Sloyan171214c2020-09-09 09:07:37 +0100234 coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
telsoa014fcda012018-03-09 14:13:49 +0000235 }
236
Derek Lamberti0790dce2019-04-15 18:37:35 +0100237 const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
238 if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
239 {
240 return nullptr;
241 }
242
telsoa01c577f2c2018-08-31 09:22:23 +0100243 return std::make_unique<ClSubTensorHandle>(
Jan Eilersbb446e52020-04-02 13:56:54 +0100244 PolymorphicDowncast<IClTensorHandle*>(&parent), shape, coords);
telsoa014fcda012018-03-09 14:13:49 +0000245}
246
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000247std::unique_ptr<IWorkload> ClWorkloadFactory::CreateWorkload(LayerType type,
248 const QueueDescriptor& descriptor,
249 const WorkloadInfo& info) const
250{
251 switch(type)
252 {
253 case LayerType::Activation :
254 {
255 auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
256 return MakeWorkload<ClActivationWorkload>(*activationQueueDescriptor, info, m_CLCompileContext);
257 }
258 case LayerType::Addition :
259 {
260 auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
261 return MakeWorkload<ClAdditionWorkload>(*additionQueueDescriptor, info, m_CLCompileContext);
262 }
263 case LayerType::ArgMinMax :
264 {
265 auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
266 return std::make_unique<ClArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info, m_CLCompileContext);
267 }
268 case LayerType::BatchNormalization :
269 {
270 auto batchNormalizationQueueDescriptor
271 = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
272 return MakeWorkload<ClBatchNormalizationFloatWorkload, NullWorkload>
273 (*batchNormalizationQueueDescriptor, info, m_CLCompileContext);
274 }
275 case LayerType::BatchToSpaceNd :
276 {
277 auto batchToSpaceNdQueueDescriptor
278 = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
279 return MakeWorkload<ClBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info, m_CLCompileContext);
280 }
281 case LayerType::Cast :
282 {
283 auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
284 return MakeWorkload<ClCastWorkload>(*castQueueDescriptor, info, m_CLCompileContext);
285 }
286 case LayerType::ChannelShuffle :
287 {
288 auto channelShuffleQueueDescriptor
289 = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
290 return MakeWorkload<ClChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info, m_CLCompileContext);
291 }
292 case LayerType::Comparison :
293 {
294 auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
295 return MakeWorkload<ClComparisonWorkload>(*comparisonQueueDescriptor, info, m_CLCompileContext);
296 }
297 case LayerType::Concat :
298 {
299 auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
300 return MakeWorkload<ClConcatWorkload>(*concatQueueDescriptor, info, m_CLCompileContext);
301 }
302 case LayerType::Constant :
303 {
304 auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
305 return MakeWorkload<ClConstantWorkload>(*constantQueueDescriptor, info, m_CLCompileContext);
306 }
307 case LayerType::ConvertFp16ToFp32 :
308 {
309 auto convertFp16ToFp32QueueDescriptor
310 = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
311 return MakeWorkload<ClConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor,
312 info,
313 m_CLCompileContext);
314 }
315 case LayerType::ConvertFp32ToFp16 :
316 {
317 auto convertFp32ToFp16QueueDescriptor
318 = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
319 return MakeWorkload<ClConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor,
320 info,
321 m_CLCompileContext);
322 }
323 case LayerType::Convolution2d :
324 {
325 auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
326
327 bool isFastMathEnabled = false;
328 if (m_ModelContextPtr)
329 {
330 if (m_ModelContextPtr.get() != nullptr)
331 {
332 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
333 if (modelOptions)
334 {
335 isFastMathEnabled = modelOptions->IsFastMathEnabled();
336 }
337 }
338 }
339 return MakeWorkload<ClConvolution2dWorkload>(*convolution2dQueueDescriptor,
340 info,
341 m_MemoryManager->GetIntraLayerManager(),
342 m_CLCompileContext,
343 isFastMathEnabled);
344 }
345 case LayerType::Convolution3d :
346 {
347 auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
348
349 bool isFastMathEnabled = false;
350 if (m_ModelContextPtr)
351 {
352 if (m_ModelContextPtr.get() != nullptr)
353 {
354 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
355 if (modelOptions)
356 {
357 isFastMathEnabled = modelOptions->IsFastMathEnabled();
358 }
359 }
360 }
361 return MakeWorkload<ClConvolution3dWorkload>(*convolution3dQueueDescriptor,
362 info,
363 m_MemoryManager->GetIntraLayerManager(),
364 m_CLCompileContext,
365 isFastMathEnabled);
366 }
367 case LayerType::Debug :
368 {
369 auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
370 return MakeWorkload<NullWorkload, NullWorkload>(*debugQueueDescriptor, info, m_CLCompileContext);
371 }
372 case LayerType::DepthToSpace :
373 {
374 auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
375 return MakeWorkload<ClDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info, m_CLCompileContext);
376 }
377 case LayerType::DepthwiseConvolution2d :
378 {
379 auto depthwiseConvolution2dQueueDescriptor
380 = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
381 return MakeWorkload<ClDepthwiseConvolutionWorkload>(*depthwiseConvolution2dQueueDescriptor,
382 info,
383 m_CLCompileContext);
384 }
385 case LayerType::Dequantize :
386 {
387 auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
388 return MakeWorkload<ClDequantizeWorkload>(*dequantizeQueueDescriptor, info, m_CLCompileContext);
389 }
390 case LayerType::DetectionPostProcess :
391 {
392 auto detectionPostProcessQueueDescriptor
393 = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
394 return MakeWorkload<NullWorkload, NullWorkload>(*detectionPostProcessQueueDescriptor,
395 info,
396 m_CLCompileContext);
397 }
398 case LayerType::Division :
399 {
400 auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
401 return std::make_unique<ClDivisionWorkload>(*divisionQueueDescriptor, info, m_CLCompileContext);
402 }
403 case LayerType::ElementwiseUnary :
404 {
405 auto elementwiseUnaryQueueDescriptor
406 = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
407
408 switch(elementwiseUnaryQueueDescriptor->m_Parameters.m_Operation)
409 {
410 case UnaryOperation::Abs:
411 {
412 AbsQueueDescriptor absQueueDescriptor;
413 absQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
414 absQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
415
416 return std::make_unique<ClAbsWorkload>(absQueueDescriptor, info, m_CLCompileContext);
417 }
418 case UnaryOperation::Exp:
419 return std::make_unique<ClExpWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
420 case UnaryOperation::Log:
421 return std::make_unique<ClLogWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
422 case UnaryOperation::LogicalNot:
423 return std::make_unique<ClLogicalNotWorkload>(*elementwiseUnaryQueueDescriptor,
424 info,
425 m_CLCompileContext);
426 case UnaryOperation::Neg:
427 return std::make_unique<ClNegWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
428 case UnaryOperation::Rsqrt:
429 {
430 RsqrtQueueDescriptor rsqrtQueueDescriptor;
431 rsqrtQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
432 rsqrtQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
433
434 return std::make_unique<ClRsqrtWorkload>(rsqrtQueueDescriptor, info, m_CLCompileContext);
435 }
436 case UnaryOperation::Sin:
437 return std::make_unique<ClSinWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
438 default:
439 return nullptr;
440 }
441 }
442 case LayerType::Fill :
443 {
444 auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
445 return std::make_unique<ClFillWorkload>(*fillQueueDescriptor, info, m_CLCompileContext);
446 }
447 case LayerType::Floor :
448 {
449 auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
450 return MakeWorkload<ClFloorFloatWorkload, NullWorkload>(*floorQueueDescriptor, info, m_CLCompileContext);
451 }
452 case LayerType::FullyConnected :
453 {
454 auto fullyConnectedQueueDescriptor
455 = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
456 return MakeWorkload<ClFullyConnectedWorkload>(*fullyConnectedQueueDescriptor,
457 info,
458 m_MemoryManager->GetIntraLayerManager(),
459 m_CLCompileContext);
460 }
461 case LayerType::Gather :
462 {
463 auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
464 return MakeWorkload<ClGatherWorkload>(*gatherQueueDescriptor, info, m_CLCompileContext);
465 }
Teresa Charlin989e2f62022-04-27 16:26:11 +0100466 case LayerType::GatherNd :
467 {
468 auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
469 return MakeWorkload<ClGatherNdWorkload>(*gatherNdQueueDescriptor, info, m_CLCompileContext);
470 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000471 case LayerType::Input :
472 {
473 auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
474 return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
475 }
476 case LayerType::InstanceNormalization :
477 {
478 auto instanceNormalizationQueueDescriptor
479 = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
480 return MakeWorkload<ClInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor,
481 info,
482 m_CLCompileContext);
483 }
484 case LayerType::L2Normalization :
485 {
486 auto l2NormalizationQueueDescriptor
487 = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
488 return MakeWorkload<ClL2NormalizationFloatWorkload, NullWorkload>(*l2NormalizationQueueDescriptor,
489 info,
490 m_CLCompileContext);
491 }
492 case LayerType::LogicalBinary :
493 {
494 auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
495
496 switch(logicalBinaryQueueDescriptor->m_Parameters.m_Operation)
497 {
498 case LogicalBinaryOperation::LogicalAnd:
499 return std::make_unique<ClLogicalAndWorkload>(*logicalBinaryQueueDescriptor,
500 info,
501 m_CLCompileContext);
502 case LogicalBinaryOperation::LogicalOr:
503 return std::make_unique<ClLogicalOrWorkload>(*logicalBinaryQueueDescriptor,
504 info,
505 m_CLCompileContext);
506 default:
507 return nullptr;
508 }
509 }
510 case LayerType::LogSoftmax :
511 {
512 auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
513
514 return MakeWorkload<ClLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor,
515 info,
516 m_MemoryManager->GetIntraLayerManager(),
517 m_CLCompileContext);
518 }
519 case LayerType::Lstm :
520 {
521 auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
522 return MakeWorkload<ClLstmFloatWorkload, NullWorkload>(*lstmQueueDescriptor, info, m_CLCompileContext);
523 }
524 case LayerType::Maximum :
525 {
526 auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
527 return MakeWorkload<ClMaximumWorkload>(*maximumQueueDescriptor, info, m_CLCompileContext);
528 }
529 case LayerType::Mean :
530 {
531 auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
532 return MakeWorkload<ClMeanWorkload>(*meanQueueDescriptor, info, m_CLCompileContext);
533 }
534 case LayerType::MemCopy :
535 {
536 auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
537 if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
538 {
539 throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemCopy workload");
540 }
541 return MakeWorkload<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
542 }
543 case LayerType::MemImport :
544 {
545 auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
546 if (memImportQueueDescriptor->m_Inputs.empty() || !memImportQueueDescriptor->m_Inputs[0])
547 {
548 throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemImport workload");
549 }
550 return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
551 }
552 case LayerType::Minimum :
553 {
554 auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
555 return MakeWorkload<ClMinimumWorkload>(*minimumQueueDescriptor, info, m_CLCompileContext);
556 }
557 case LayerType::Multiplication :
558 {
559 auto multiplicationQueueDescriptor = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
560 return MakeWorkload<ClMultiplicationWorkload>(*multiplicationQueueDescriptor, info, m_CLCompileContext);
561 }
562 case LayerType::Normalization :
563 {
564 auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
565 return MakeWorkload<ClNormalizationFloatWorkload, NullWorkload>(*normalizationQueueDescriptor,
566 info,
567 m_CLCompileContext);
568 }
569 case LayerType::Output :
570 {
571 auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
572 return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
573 }
574 case LayerType::Pad :
575 {
576 auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
577 return MakeWorkload<ClPadWorkload>(*padQueueDescriptor, info, m_CLCompileContext);
578 }
579 case LayerType::Permute :
580 {
581 auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
582 return MakeWorkload<ClPermuteWorkload>(*permuteQueueDescriptor, info, m_CLCompileContext);
583 }
584 case LayerType::Pooling2d :
585 {
586 auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
587 return MakeWorkload<ClPooling2dWorkload>(*pooling2dQueueDescriptor, info, m_CLCompileContext);
588 }
Ryan OSheabab8fa92022-03-09 10:29:02 +0000589 case LayerType::Pooling3d :
590 {
591 auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
592 return MakeWorkload<ClPooling3dWorkload>(*pooling3dQueueDescriptor, info, m_CLCompileContext);
593 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000594 case LayerType::PreCompiled :
595 {
596 auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
597 return MakeWorkload<NullWorkload, NullWorkload>(*preCompiledQueueDescriptor, info, m_CLCompileContext);
598 }
599 case LayerType::Prelu :
600 {
601 auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
602 return MakeWorkload<ClPreluWorkload>(*preluQueueDescriptor, info, m_CLCompileContext);
603 }
604 case LayerType::QLstm :
605 {
606 auto qLstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
607 return std::make_unique<ClQLstmWorkload>(*qLstmQueueDescriptor, info, m_CLCompileContext);
608 }
609 case LayerType::Quantize :
610 {
611 auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
612 return MakeWorkload<ClQuantizeWorkload>(*quantizeQueueDescriptor, info, m_CLCompileContext);
613 }
614 case LayerType::QuantizedLstm :
615 {
616 auto quantizedLstmQueueDescriptor = PolymorphicDowncast<const QuantizedLstmQueueDescriptor*>(&descriptor);
617 return MakeWorkload<ClQuantizedLstmWorkload>(*quantizedLstmQueueDescriptor, info, m_CLCompileContext);
618 }
619 case LayerType::Rank :
620 {
621 auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
622 return std::make_unique<ClRankWorkload>(*rankQueueDescriptor, info);
623 }
624 case LayerType::Reduce :
625 {
626 auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
627 return std::make_unique<ClReduceWorkload>(*reduceQueueDescriptor, info);
628 }
629 case LayerType::Reshape :
630 {
631 auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
632 return MakeWorkload<ClReshapeWorkload>(*reshapeQueueDescriptor, info, m_CLCompileContext);
633 }
634 case LayerType::Resize :
635 {
636 auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
637 return MakeWorkload<ClResizeWorkload>(*resizeQueueDescriptor, info, m_CLCompileContext);
638 }
639 case LayerType::Slice :
640 {
641 auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
642 return MakeWorkload<ClSliceWorkload>(*sliceQueueDescriptor, info, m_CLCompileContext);
643 }
644 case LayerType::Softmax :
645 {
646 auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
647 return std::make_unique<ClSoftmaxWorkload>(*softmaxQueueDescriptor,
648 info,
649 m_MemoryManager->GetIntraLayerManager(),
650 m_CLCompileContext);
651 }
652 case LayerType::SpaceToBatchNd :
653 {
654 auto spaceToBatchNdQueueDescriptor
655 = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
656 return MakeWorkload<ClSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info, m_CLCompileContext);
657 }
658 case LayerType::SpaceToDepth :
659 {
660 auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
661 return MakeWorkload<ClSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info, m_CLCompileContext);
662 }
663 case LayerType::Splitter :
664 {
665 auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
666 return MakeWorkload<ClSplitterWorkload>(*splitterQueueDescriptor, info, m_CLCompileContext);
667 }
668 case LayerType::Stack :
669 {
670 auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
671 return MakeWorkload<ClStackWorkload>(*stackQueueDescriptor, info, m_CLCompileContext);
672 }
673 case LayerType::StridedSlice :
674 {
675 auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
676 return MakeWorkload<ClStridedSliceWorkload>(*stridedSliceQueueDescriptor, info, m_CLCompileContext);
677 }
678 case LayerType::Subtraction :
679 {
680 auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
681 return MakeWorkload<ClSubtractionWorkload>(*subtractionQueueDescriptor, info, m_CLCompileContext);
682 }
683 case LayerType::Transpose :
684 {
685 auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
686 return MakeWorkload<ClTransposeWorkload>(*transposeQueueDescriptor, info, m_CLCompileContext);
687 }
688 case LayerType::TransposeConvolution2d :
689 {
690 auto transposeConvolution2dQueueDescriptor
691 = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
692 return MakeWorkload<ClTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor,
693 info,
694 m_MemoryManager->GetIntraLayerManager(),
695 m_CLCompileContext);
696 }
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000697 case LayerType::UnidirectionalSequenceLstm :
698 {
699 auto desc = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
700 return MakeWorkloadHelper<ClUnidirectionalSequenceLstmFloatWorkload, NullWorkload>(*desc,
701 info,
702 m_CLCompileContext);
703 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000704 default:
705 return nullptr;
706 }
707}
708
telsoa014fcda012018-03-09 14:13:49 +0000709std::unique_ptr<IWorkload> ClWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100710 const WorkloadInfo& info) const
telsoa014fcda012018-03-09 14:13:49 +0000711{
Sadik Armagane9444752020-12-02 11:28:58 +0000712 return MakeWorkload<ClActivationWorkload>(descriptor, info, m_CLCompileContext);
telsoa014fcda012018-03-09 14:13:49 +0000713}
714
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100715std::unique_ptr<IWorkload> ClWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
716 const WorkloadInfo& info) const
telsoa014fcda012018-03-09 14:13:49 +0000717{
Sadik Armagane9444752020-12-02 11:28:58 +0000718 return MakeWorkload<ClAdditionWorkload>(descriptor, info, m_CLCompileContext);
telsoa014fcda012018-03-09 14:13:49 +0000719}
720
James Conroy2dc05722019-09-19 17:00:31 +0100721std::unique_ptr<IWorkload> ClWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
722 const WorkloadInfo& info) const
723{
Sadik Armagane9444752020-12-02 11:28:58 +0000724 return std::make_unique<ClArgMinMaxWorkload>(descriptor, info, m_CLCompileContext);
James Conroy2dc05722019-09-19 17:00:31 +0100725}
726
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100727std::unique_ptr<IWorkload> ClWorkloadFactory::CreateBatchNormalization(
728 const BatchNormalizationQueueDescriptor& descriptor,
729 const WorkloadInfo& info) const
telsoa014fcda012018-03-09 14:13:49 +0000730{
Sadik Armagane9444752020-12-02 11:28:58 +0000731 return MakeWorkload<ClBatchNormalizationFloatWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
telsoa014fcda012018-03-09 14:13:49 +0000732}
733
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100734std::unique_ptr<IWorkload> ClWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
telsoa014fcda012018-03-09 14:13:49 +0000735 const WorkloadInfo& info) const
736{
Sadik Armagane9444752020-12-02 11:28:58 +0000737 return MakeWorkload<ClBatchToSpaceNdWorkload>(descriptor, info, m_CLCompileContext);
telsoa014fcda012018-03-09 14:13:49 +0000738}
739
Sadik Armaganf40d6d42021-04-22 09:12:11 +0100740std::unique_ptr<IWorkload> ClWorkloadFactory::CreateCast(const CastQueueDescriptor& descriptor,
741 const WorkloadInfo& info) const
742{
743 return MakeWorkload<ClCastWorkload>(descriptor, info, m_CLCompileContext);
744}
745
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100746std::unique_ptr<IWorkload> ClWorkloadFactory::CreateChannelShuffle(const ChannelShuffleQueueDescriptor& descriptor,
747 const WorkloadInfo& info) const
748{
749 return MakeWorkload<ClChannelShuffleWorkload>(descriptor, info, m_CLCompileContext);
750}
751
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100752std::unique_ptr<IWorkload> ClWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& descriptor,
753 const WorkloadInfo& info) const
754{
Sadik Armagane9444752020-12-02 11:28:58 +0000755 return MakeWorkload<ClComparisonWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100756}
757
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100758std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
759 const WorkloadInfo& info) const
Jim Flynn4ed6c832019-05-20 11:02:46 +0100760{
Sadik Armagane9444752020-12-02 11:28:58 +0000761 return MakeWorkload<ClConcatWorkload>(descriptor, info, m_CLCompileContext);
Jim Flynn4ed6c832019-05-20 11:02:46 +0100762}
763
telsoa014fcda012018-03-09 14:13:49 +0000764std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100765 const WorkloadInfo& info) const
telsoa014fcda012018-03-09 14:13:49 +0000766{
Sadik Armagane9444752020-12-02 11:28:58 +0000767 return MakeWorkload<ClConstantWorkload>(descriptor, info, m_CLCompileContext);
telsoa014fcda012018-03-09 14:13:49 +0000768}
769
telsoa01c577f2c2018-08-31 09:22:23 +0100770std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConvertFp16ToFp32(
771 const ConvertFp16ToFp32QueueDescriptor& descriptor,
772 const WorkloadInfo& info) const
773{
Sadik Armagane9444752020-12-02 11:28:58 +0000774 return MakeWorkload<ClConvertFp16ToFp32Workload>(descriptor, info, m_CLCompileContext);
telsoa01c577f2c2018-08-31 09:22:23 +0100775}
776
777std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConvertFp32ToFp16(
778 const ConvertFp32ToFp16QueueDescriptor& descriptor,
779 const WorkloadInfo& info) const
780{
Sadik Armagane9444752020-12-02 11:28:58 +0000781 return MakeWorkload<ClConvertFp32ToFp16Workload>(descriptor, info, m_CLCompileContext);
telsoa01c577f2c2018-08-31 09:22:23 +0100782}
783
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100784std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
785 const WorkloadInfo& info) const
786{
Sadik Armagan04a72972020-09-14 15:44:18 +0100787 bool isFastMathEnabled = false;
788 if (m_ModelContextPtr)
789 {
790 if (m_ModelContextPtr.get() != nullptr)
791 {
792 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
793 if (modelOptions)
794 {
795 isFastMathEnabled = modelOptions->IsFastMathEnabled();
796 }
797 }
798 }
799 return MakeWorkload<ClConvolution2dWorkload>(descriptor,
800 info,
801 m_MemoryManager->GetIntraLayerManager(),
Sadik Armagane9444752020-12-02 11:28:58 +0000802 m_CLCompileContext,
Sadik Armagan04a72972020-09-14 15:44:18 +0100803 isFastMathEnabled);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100804}
805
Teresa Charlin615ad6c2021-10-26 12:22:20 +0100806std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConvolution3d(const Convolution3dQueueDescriptor& descriptor,
807 const WorkloadInfo& info) const
808{
809 bool isFastMathEnabled = false;
810 if (m_ModelContextPtr)
811 {
812 if (m_ModelContextPtr.get() != nullptr)
813 {
814 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
815 if (modelOptions)
816 {
817 isFastMathEnabled = modelOptions->IsFastMathEnabled();
818 }
819 }
820 }
821 return MakeWorkload<ClConvolution3dWorkload>(descriptor,
822 info,
823 m_MemoryManager->GetIntraLayerManager(),
824 m_CLCompileContext,
825 isFastMathEnabled);
826}
827
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100828std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
829 const WorkloadInfo& info) const
830{
Sadik Armagane9444752020-12-02 11:28:58 +0000831 return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100832}
833
834std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
835 const WorkloadInfo& info) const
836{
Sadik Armagane9444752020-12-02 11:28:58 +0000837 return MakeWorkload<ClDepthToSpaceWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100838}
839
840std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDepthwiseConvolution2d(
841 const DepthwiseConvolution2dQueueDescriptor& descriptor,
842 const WorkloadInfo& info) const
843{
Sadik Armagane9444752020-12-02 11:28:58 +0000844 return MakeWorkload<ClDepthwiseConvolutionWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100845}
846
847std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDequantize(const DequantizeQueueDescriptor& descriptor,
848 const WorkloadInfo& info) const
849{
Sadik Armagane9444752020-12-02 11:28:58 +0000850 return MakeWorkload<ClDequantizeWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100851}
852
853std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDetectionPostProcess(
854 const DetectionPostProcessQueueDescriptor& descriptor,
855 const WorkloadInfo& info) const
856{
Sadik Armagane9444752020-12-02 11:28:58 +0000857 return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100858}
859
860std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& descriptor,
861 const WorkloadInfo& info) const
862{
Teresa Charline11e63d2021-04-21 12:56:45 +0100863 return std::make_unique<ClDivisionWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100864}
865
josh minor4a3c6102020-01-06 16:40:46 -0600866std::unique_ptr<IWorkload> ClWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
867 const WorkloadInfo& info) const
868{
Sadik Armagan9fabf432020-05-27 13:40:58 +0100869 switch(descriptor.m_Parameters.m_Operation)
josh minor4a3c6102020-01-06 16:40:46 -0600870 {
Sadik Armagan9fabf432020-05-27 13:40:58 +0100871 case UnaryOperation::Abs:
James Conroyfe3ec942020-11-18 14:20:53 +0000872 {
873 AbsQueueDescriptor absQueueDescriptor;
874 absQueueDescriptor.m_Inputs = descriptor.m_Inputs;
875 absQueueDescriptor.m_Outputs = descriptor.m_Outputs;
josh minor4a3c6102020-01-06 16:40:46 -0600876
Sadik Armagane9444752020-12-02 11:28:58 +0000877 return std::make_unique<ClAbsWorkload>(absQueueDescriptor, info, m_CLCompileContext);
James Conroyfe3ec942020-11-18 14:20:53 +0000878 }
Sadik Armagan9fabf432020-05-27 13:40:58 +0100879 case UnaryOperation::Exp:
Sadik Armagane9444752020-12-02 11:28:58 +0000880 return std::make_unique<ClExpWorkload>(descriptor, info, m_CLCompileContext);
Teresa Charlin50de4fa2021-05-31 18:47:33 +0100881 case UnaryOperation::Log:
882 return std::make_unique<ClLogWorkload>(descriptor, info, m_CLCompileContext);
883 case UnaryOperation::LogicalNot:
884 return std::make_unique<ClLogicalNotWorkload>(descriptor, info, m_CLCompileContext);
Sadik Armagan9fabf432020-05-27 13:40:58 +0100885 case UnaryOperation::Neg:
Sadik Armagane9444752020-12-02 11:28:58 +0000886 return std::make_unique<ClNegWorkload>(descriptor, info, m_CLCompileContext);
Sadik Armagan9fabf432020-05-27 13:40:58 +0100887 case UnaryOperation::Rsqrt:
James Conroyfe3ec942020-11-18 14:20:53 +0000888 {
889 RsqrtQueueDescriptor rsqrtQueueDescriptor;
890 rsqrtQueueDescriptor.m_Inputs = descriptor.m_Inputs;
891 rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs;
josh minor4a3c6102020-01-06 16:40:46 -0600892
Sadik Armagane9444752020-12-02 11:28:58 +0000893 return std::make_unique<ClRsqrtWorkload>(rsqrtQueueDescriptor, info, m_CLCompileContext);
James Conroyfe3ec942020-11-18 14:20:53 +0000894 }
Teresa Charlin50de4fa2021-05-31 18:47:33 +0100895 case UnaryOperation::Sin:
896 return std::make_unique<ClSinWorkload>(descriptor, info, m_CLCompileContext);
Sadik Armagan9fabf432020-05-27 13:40:58 +0100897 default:
898 return nullptr;
josh minor4a3c6102020-01-06 16:40:46 -0600899 }
josh minor4a3c6102020-01-06 16:40:46 -0600900}
901
Sadik Armagan66aecb02020-06-24 11:42:20 +0100902std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFill(const FillQueueDescriptor& descriptor,
903 const WorkloadInfo& info) const
904{
Sadik Armagane9444752020-12-02 11:28:58 +0000905 return std::make_unique<ClFillWorkload>(descriptor, info, m_CLCompileContext);
Sadik Armagan66aecb02020-06-24 11:42:20 +0100906}
907
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100908std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
909 const WorkloadInfo& info) const
910{
Sadik Armagane9444752020-12-02 11:28:58 +0000911 return MakeWorkload<ClFloorFloatWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100912}
913
914std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
915 const WorkloadInfo& info) const
916{
Sadik Armagane9444752020-12-02 11:28:58 +0000917 return MakeWorkload<ClFullyConnectedWorkload>(descriptor,
918 info,
919 m_MemoryManager->GetIntraLayerManager(),
920 m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100921}
922
923std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGather(const GatherQueueDescriptor& descriptor,
924 const WorkloadInfo& info) const
925{
Sadik Armagane9444752020-12-02 11:28:58 +0000926 return MakeWorkload<ClGatherWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100927}
928
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100929std::unique_ptr<IWorkload> ClWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
930 const WorkloadInfo& info) const
931{
932 return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
933}
934
935std::unique_ptr<IWorkload> ClWorkloadFactory::CreateInstanceNormalization(
936 const InstanceNormalizationQueueDescriptor& descriptor,
937 const WorkloadInfo& info) const
938{
Sadik Armagane9444752020-12-02 11:28:58 +0000939 return MakeWorkload<ClInstanceNormalizationWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100940}
941
942std::unique_ptr<IWorkload> ClWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
943 const WorkloadInfo& info) const
944{
Sadik Armagane9444752020-12-02 11:28:58 +0000945 return MakeWorkload<ClL2NormalizationFloatWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100946}
947
James Conroyfe3ec942020-11-18 14:20:53 +0000948std::unique_ptr<IWorkload> ClWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
949 const WorkloadInfo& info) const
950{
951 switch(descriptor.m_Parameters.m_Operation)
952 {
953 case LogicalBinaryOperation::LogicalAnd:
Sadik Armagane9444752020-12-02 11:28:58 +0000954 return std::make_unique<ClLogicalAndWorkload>(descriptor, info, m_CLCompileContext);
James Conroyfe3ec942020-11-18 14:20:53 +0000955 case LogicalBinaryOperation::LogicalOr:
Sadik Armagane9444752020-12-02 11:28:58 +0000956 return std::make_unique<ClLogicalOrWorkload>(descriptor, info, m_CLCompileContext);
James Conroyfe3ec942020-11-18 14:20:53 +0000957 default:
958 return nullptr;
959 }
960}
961
Teresa Charlin8398edc2020-07-20 14:23:02 +0100962std::unique_ptr<IWorkload> ClWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
963 const WorkloadInfo& info) const
964{
Sadik Armagane9444752020-12-02 11:28:58 +0000965 return MakeWorkload<ClLogSoftmaxWorkload>(descriptor,
966 info,
967 m_MemoryManager->GetIntraLayerManager(),
968 m_CLCompileContext);
Teresa Charlin8398edc2020-07-20 14:23:02 +0100969}
970
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100971std::unique_ptr<IWorkload> ClWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
972 const WorkloadInfo& info) const
973{
Sadik Armagane9444752020-12-02 11:28:58 +0000974 return MakeWorkload<ClLstmFloatWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100975}
976
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +0000977std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor,
978 const WorkloadInfo& info) const
979{
Sadik Armagane9444752020-12-02 11:28:58 +0000980 return MakeWorkload<ClMaximumWorkload>(descriptor, info, m_CLCompileContext);
Nattapat Chaimanowong5a4304a2018-11-28 10:44:37 +0000981}
982
narpra01a6bf9122018-09-10 09:50:09 +0100983std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
984 const WorkloadInfo& info) const
985{
Sadik Armagane9444752020-12-02 11:28:58 +0000986 return MakeWorkload<ClMeanWorkload>(descriptor, info, m_CLCompileContext);
narpra01a6bf9122018-09-10 09:50:09 +0100987}
988
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100989std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
990 const WorkloadInfo& info) const
jimfly012c9322a2018-09-19 10:59:49 +0100991{
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100992 if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0])
993 {
994 throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemCopy workload");
995 }
996
997 return MakeWorkload<CopyMemGenericWorkload>(descriptor, info);
jimfly012c9322a2018-09-19 10:59:49 +0100998}
999
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001000std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& descriptor,
1001 const WorkloadInfo& info) const
FrancisMurtagh20995952018-12-17 12:11:36 +00001002{
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001003 if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0])
1004 {
1005 throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemImport workload");
1006 }
1007
1008 return std::make_unique<ImportMemGenericWorkload>(descriptor, info);
FrancisMurtagh20995952018-12-17 12:11:36 +00001009}
1010
kevmay0190539692018-11-29 08:40:19 +00001011std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
1012 const WorkloadInfo& info) const
1013{
Sadik Armagane9444752020-12-02 11:28:58 +00001014 return MakeWorkload<ClMinimumWorkload>(descriptor, info, m_CLCompileContext);
kevmay0190539692018-11-29 08:40:19 +00001015}
1016
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001017std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
1018 const WorkloadInfo& info) const
1019{
Sadik Armagane9444752020-12-02 11:28:58 +00001020 return MakeWorkload<ClMultiplicationWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001021}
1022
1023std::unique_ptr<IWorkload> ClWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
1024 const WorkloadInfo& info) const
1025{
Sadik Armagane9444752020-12-02 11:28:58 +00001026 return MakeWorkload<ClNormalizationFloatWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001027}
1028
1029std::unique_ptr<IWorkload> ClWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
1030 const WorkloadInfo& info) const
1031{
1032 return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
1033}
1034
1035std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
1036 const WorkloadInfo& info) const
1037{
Sadik Armagane9444752020-12-02 11:28:58 +00001038 return MakeWorkload<ClPadWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001039}
1040
1041std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
Matteo Martincigh59a950c2018-12-13 12:48:25 +00001042 const WorkloadInfo& info) const
1043{
Sadik Armagane9444752020-12-02 11:28:58 +00001044 return MakeWorkload<ClPermuteWorkload>(descriptor, info, m_CLCompileContext);
Matteo Martincigh59a950c2018-12-13 12:48:25 +00001045}
1046
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001047std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
1048 const WorkloadInfo& info) const
Nattapat Chaimanowonga9a1cf12018-12-03 16:06:49 +00001049{
Sadik Armagane9444752020-12-02 11:28:58 +00001050 return MakeWorkload<ClPooling2dWorkload>(descriptor, info, m_CLCompileContext);
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00001051}
1052
Matteo Martincigh49124022019-01-11 13:25:59 +00001053std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
1054 const WorkloadInfo& info) const
1055{
Sadik Armagane9444752020-12-02 11:28:58 +00001056 return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info, m_CLCompileContext);
Matteo Martincigh49124022019-01-11 13:25:59 +00001057}
1058
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001059std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &descriptor,
1060 const WorkloadInfo &info) const
narpra01b89b05f2019-01-16 09:53:09 +00001061{
Sadik Armagane9444752020-12-02 11:28:58 +00001062 return MakeWorkload<ClPreluWorkload>(descriptor, info, m_CLCompileContext);
narpra01b89b05f2019-01-16 09:53:09 +00001063}
1064
Ryan OShea2323af42020-05-13 16:36:19 +01001065std::unique_ptr<IWorkload> ClWorkloadFactory::CreateQLstm(const QLstmQueueDescriptor& descriptor,
1066 const WorkloadInfo& info) const
1067{
Sadik Armagane9444752020-12-02 11:28:58 +00001068 return std::make_unique<ClQLstmWorkload>(descriptor, info, m_CLCompileContext);
Ryan OShea2323af42020-05-13 16:36:19 +01001069}
1070
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001071std::unique_ptr<IWorkload> ClWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
1072 const WorkloadInfo& info) const
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +01001073{
Sadik Armagane9444752020-12-02 11:28:58 +00001074 return MakeWorkload<ClQuantizeWorkload>(descriptor, info, m_CLCompileContext);
James Conroyd2aa85e2019-07-01 17:12:40 +01001075}
1076
Ferran Balaguer737d9ff2019-08-01 09:58:08 +01001077std::unique_ptr<IWorkload> ClWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
1078 const WorkloadInfo& info) const
1079{
Sadik Armagane9444752020-12-02 11:28:58 +00001080 return MakeWorkload<ClQuantizedLstmWorkload>(descriptor, info, m_CLCompileContext);
Ferran Balaguer737d9ff2019-08-01 09:58:08 +01001081}
1082
David Monahanc11ba462020-12-03 11:09:46 +00001083std::unique_ptr<IWorkload> ClWorkloadFactory::CreateRank(const RankQueueDescriptor& descriptor,
1084 const WorkloadInfo& info) const
1085{
1086 return std::make_unique<ClRankWorkload>(descriptor, info);
1087}
1088
Sadik Armagana2747482021-02-09 10:28:54 +00001089std::unique_ptr<IWorkload> ClWorkloadFactory::CreateReduce(const ReduceQueueDescriptor& descriptor,
1090 const WorkloadInfo& info) const
1091{
1092 return std::make_unique<ClReduceWorkload>(descriptor, info);
1093}
1094
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001095std::unique_ptr<IWorkload> ClWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
1096 const WorkloadInfo& info) const
1097{
Sadik Armagane9444752020-12-02 11:28:58 +00001098 return MakeWorkload<ClReshapeWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001099}
1100
1101std::unique_ptr<IWorkload> ClWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
1102 const WorkloadInfo& info) const
1103{
Sadik Armagane9444752020-12-02 11:28:58 +00001104 return MakeWorkload<ClResizeWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001105}
1106
Aron Virginas-Tar94c4fef2019-11-25 15:37:08 +00001107std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
1108 const WorkloadInfo& info) const
1109{
Sadik Armagane9444752020-12-02 11:28:58 +00001110 return MakeWorkload<ClSliceWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar94c4fef2019-11-25 15:37:08 +00001111}
1112
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001113std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
1114 const WorkloadInfo& info) const
1115{
Sadik Armagane9444752020-12-02 11:28:58 +00001116 return std::make_unique<ClSoftmaxWorkload>(descriptor,
1117 info,
1118 m_MemoryManager->GetIntraLayerManager(),
1119 m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001120}
1121
1122std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
1123 const WorkloadInfo& info) const
1124{
Sadik Armagane9444752020-12-02 11:28:58 +00001125 return MakeWorkload<ClSpaceToBatchNdWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001126}
1127
1128std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
1129 const WorkloadInfo& info) const
1130{
Sadik Armagane9444752020-12-02 11:28:58 +00001131 return MakeWorkload<ClSpaceToDepthWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001132}
1133
1134std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
1135 const WorkloadInfo& info) const
1136{
Sadik Armagane9444752020-12-02 11:28:58 +00001137 return MakeWorkload<ClSplitterWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001138}
1139
Matthew Jacksond5166102019-07-31 14:06:28 +01001140std::unique_ptr<IWorkload> ClWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
1141 const WorkloadInfo& info) const
1142{
Sadik Armagane9444752020-12-02 11:28:58 +00001143 return MakeWorkload<ClStackWorkload>(descriptor, info, m_CLCompileContext);
Matthew Jacksond5166102019-07-31 14:06:28 +01001144}
1145
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001146std::unique_ptr<IWorkload> ClWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
Aron Virginas-Tarb2801962019-09-30 11:24:53 +01001147 const WorkloadInfo& info) const
1148{
Sadik Armagane9444752020-12-02 11:28:58 +00001149 return MakeWorkload<ClStridedSliceWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001150}
1151
1152std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
1153 const WorkloadInfo& info) const
1154{
Sadik Armagane9444752020-12-02 11:28:58 +00001155 return MakeWorkload<ClSubtractionWorkload>(descriptor, info, m_CLCompileContext);
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001156}
1157
Mike Kellyc9ea45a2020-02-28 18:11:58 +00001158std::unique_ptr<IWorkload> ClWorkloadFactory::CreateTranspose(const TransposeQueueDescriptor& descriptor,
1159 const WorkloadInfo& info) const
1160{
Sadik Armagane9444752020-12-02 11:28:58 +00001161 return MakeWorkload<ClTransposeWorkload>(descriptor, info, m_CLCompileContext);
Mike Kellyc9ea45a2020-02-28 18:11:58 +00001162}
1163
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001164std::unique_ptr<IWorkload> ClWorkloadFactory::CreateTransposeConvolution2d(
1165 const TransposeConvolution2dQueueDescriptor& descriptor,
1166 const WorkloadInfo& info) const
1167{
Sadik Armagane9444752020-12-02 11:28:58 +00001168 return MakeWorkload<ClTransposeConvolution2dWorkload>(descriptor,
1169 info,
1170 m_MemoryManager->GetIntraLayerManager(),
1171 m_CLCompileContext);
Aron Virginas-Tarb2801962019-09-30 11:24:53 +01001172}
1173
telsoa014fcda012018-03-09 14:13:49 +00001174} // namespace armnn