blob: da05f6771143fb5902964bc3f26c7b05711ee220 [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Mike Kelly3ec30772023-03-08 13:47:17 +00002// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "ClWorkloadFactory.hpp"
David Beck79141b92018-10-23 16:09:36 +01006#include "ClBackendId.hpp"
Sadik Armagan04a72972020-09-14 15:44:18 +01007#include "ClBackendModelContext.hpp"
Matthew Sloyan80fbcd52021-01-07 13:28:47 +00008#include "ClContextDeserializer.hpp"
9#include "ClContextSerializer.hpp"
telsoa014fcda012018-03-09 14:13:49 +000010
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000011#include <Layer.hpp>
12
David Beck0dbe0ee2018-09-24 15:59:27 +010013#include <armnn/Exceptions.hpp>
Sadik Armagan6b9eba22021-11-01 07:27:50 +000014#include <armnn/Logging.hpp>
David Beck0dbe0ee2018-09-24 15:59:27 +010015#include <armnn/Utils.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000016#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan171214c2020-09-09 09:07:37 +010017#include <armnn/utility/NumericCast.hpp>
Jan Eilersbb446e52020-04-02 13:56:54 +010018#include <armnn/utility/PolymorphicDowncast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000019
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/MakeWorkloadHelper.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000021#include <armnn/backends/MemCopyWorkload.hpp>
Derek Lambertif674aa02019-08-01 15:56:25 +010022#include <backendsCommon/MemImportWorkload.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000023#include <armnn/backends/TensorHandle.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010024
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000025#include <cl/ClTensorHandle.hpp>
26#include <cl/workloads/ClWorkloads.hpp>
27#include <cl/workloads/ClWorkloadUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000028
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000029#include <arm_compute/core/CL/CLKernelLibrary.h>
30#include <arm_compute/runtime/CL/CLBufferAllocator.h>
31#include <arm_compute/runtime/CL/CLScheduler.h>
telsoa014fcda012018-03-09 14:13:49 +000032
Rob Hughes9542f902021-07-14 09:48:54 +010033#include <armnnUtils/Filesystem.hpp>
Matthew Sloyan80fbcd52021-01-07 13:28:47 +000034#include <fstream>
Sadik Armagandea8fb62020-11-26 10:38:11 +000035
Sadik Armaganb7851f92021-10-06 16:37:02 +010036#include <sys/stat.h>
37
telsoa014fcda012018-03-09 14:13:49 +000038namespace armnn
39{
40
David Beck79141b92018-10-23 16:09:36 +010041namespace
42{
43static const BackendId s_Id{ClBackendId()};
44}
45
telsoa01c577f2c2018-08-31 09:22:23 +010046bool ClWorkloadFactory::IsLayerSupported(const Layer& layer,
David Beck29c75de2018-10-23 13:35:58 +010047 Optional<DataType> dataType,
telsoa01c577f2c2018-08-31 09:22:23 +010048 std::string& outReasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000049{
David Beck79141b92018-10-23 16:09:36 +010050 return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
51}
52
Sadik Armagan04a72972020-09-14 15:44:18 +010053bool ClWorkloadFactory::IsLayerSupported(const IConnectableLayer& layer,
54 Optional<DataType> dataType,
55 std::string& outReasonIfUnsupported,
56 const ModelOptions& modelOptions)
57{
58 return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported, modelOptions);
59}
60
David Beck79141b92018-10-23 16:09:36 +010061const BackendId& ClWorkloadFactory::GetBackendId() const
62{
63 return s_Id;
telsoa014fcda012018-03-09 14:13:49 +000064}
65
Sadik Armagandea8fb62020-11-26 10:38:11 +000066void ClWorkloadFactory::AfterWorkloadsCreated()
67{
68 if(m_ModelContextPtr)
69 {
70 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
71 if (modelOptions->SaveCachedNetwork())
72 {
Sadik Armaganb7851f92021-10-06 16:37:02 +010073 ClContextSerializer serializer;
74 serializer.Serialize(m_CLCompileContext);
75 auto cachedFd = modelOptions->GetCachedFileDescriptor();
76 if (cachedFd != -1)
77 {
78 std::vector<uint8_t> compiledContextData;
79 std::stringstream stream;
80 bool serialized = serializer.SaveSerializedToStream(stream);
81 if (serialized)
82 {
83 std::string const serializedString{stream.str()};
84 std::copy(serializedString.begin(),
85 serializedString.end(),
86 std::back_inserter(compiledContextData));
Sadik Armagan6b9eba22021-11-01 07:27:50 +000087 auto success = write(cachedFd, compiledContextData.data(), compiledContextData.size());
88 if (success == -1)
89 {
90 ARMNN_LOG(info) << "ClWorkloadFactory:: Could not cache the compiled context!";
91 }
Sadik Armaganb7851f92021-10-06 16:37:02 +010092 }
93 }
94
Sadik Armagandea8fb62020-11-26 10:38:11 +000095 // Save map to a filepath provided in ModelOptions
96 auto filePath = modelOptions->GetCachedNetworkFilePath();
97 if (filePath != "" && fs::exists(filePath) && fs::is_regular_file(filePath))
98 {
Matthew Sloyan80fbcd52021-01-07 13:28:47 +000099 // Serialize ClContext to the file specified
Matthew Sloyan80fbcd52021-01-07 13:28:47 +0000100 std::ofstream file(filePath, std::ios::out | std::ios::binary);
101 serializer.SaveSerializedToStream(file);
Sadik Armagandea8fb62020-11-26 10:38:11 +0000102 }
103 }
104 }
105}
106
Aron Virginas-Tara8e06ed2018-10-19 16:46:15 +0100107template <typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
108std::unique_ptr<IWorkload> ClWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
109 const WorkloadInfo& info,
110 Args&&... args)
111{
112 try
113 {
114 return MakeWorkloadHelper<FloatWorkload, Uint8Workload>(descriptor, info, std::forward<Args>(args)...);
115 }
116 catch (const cl::Error& clError)
117 {
118 throw WrapClError(clError, CHECK_LOCATION());
119 }
120}
121
122template <typename Workload, typename QueueDescriptorType, typename... Args>
123std::unique_ptr<IWorkload> ClWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
124 const WorkloadInfo& info,
125 Args&&... args)
126{
127 try
128 {
129 return std::make_unique<Workload>(descriptor, info, std::forward<Args>(args)...);
130 }
131 catch (const cl::Error& clError)
132 {
133 throw WrapClError(clError, CHECK_LOCATION());
134 }
135}
136
Sadik Armagandea8fb62020-11-26 10:38:11 +0000137void ClWorkloadFactory::InitializeCLCompileContext()
138{
139 // Initialize our m_CLCompileContext using default device and context
Sadik Armagane9444752020-12-02 11:28:58 +0000140 auto context = arm_compute::CLKernelLibrary::get().context();
141 auto device = arm_compute::CLKernelLibrary::get().get_device();
Sadik Armagandea8fb62020-11-26 10:38:11 +0000142 m_CLCompileContext = arm_compute::CLCompileContext(context, device);
143
144 if (m_ModelContextPtr)
145 {
146 // Load saved programs if the user has set a filepath
147 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
148 auto filePath = modelOptions->GetCachedNetworkFilePath();
Sadik Armaganb7851f92021-10-06 16:37:02 +0100149 if (!(modelOptions->SaveCachedNetwork()))
Sadik Armagandea8fb62020-11-26 10:38:11 +0000150 {
Matthew Sloyan80fbcd52021-01-07 13:28:47 +0000151 ClContextDeserializer deserializer;
Sadik Armaganb7851f92021-10-06 16:37:02 +0100152 auto cachedFd = modelOptions->GetCachedFileDescriptor();
153 if (cachedFd != -1)
154 {
155 struct stat statBuffer;
156 if (fstat(cachedFd, &statBuffer) == 0)
157 {
158 long dataSize = static_cast<long>(statBuffer.st_size);
159 if( dataSize > 0)
160 {
161 auto offset = lseek(cachedFd, 0, SEEK_CUR);
162 if (offset == 0)
163 {
164 std::vector <uint8_t> compiledContextData(static_cast<unsigned int>(dataSize));
Sadik Armagan6b9eba22021-11-01 07:27:50 +0000165 auto success = pread(cachedFd, compiledContextData.data(), compiledContextData.size(), 0);
166 if (success != -1)
167 {
168 deserializer.DeserializeFromBinary(m_CLCompileContext,
169 context,
170 device,
171 compiledContextData);
172 }
Sadik Armaganb7851f92021-10-06 16:37:02 +0100173 }
174 }
175
176 }
177 }
178
179 if (filePath != "" && fs::exists(filePath) && fs::is_regular_file(filePath))
180 {
181 // Deserialize binary file and load into m_CLCompileContext
182 deserializer.Deserialize(m_CLCompileContext, context, device, filePath);
183 }
Sadik Armagandea8fb62020-11-26 10:38:11 +0000184 }
185 }
186}
187
Aron Virginas-Tar56055192018-11-12 18:10:43 +0000188ClWorkloadFactory::ClWorkloadFactory(const std::shared_ptr<ClMemoryManager>& memoryManager)
Sadik Armagan04a72972020-09-14 15:44:18 +0100189 : m_MemoryManager(memoryManager), m_ModelContextPtr(IBackendInternal::IBackendSpecificModelContextPtr{})
190{
Sadik Armagandea8fb62020-11-26 10:38:11 +0000191 InitializeCLCompileContext();
Sadik Armagan04a72972020-09-14 15:44:18 +0100192}
193
194ClWorkloadFactory::ClWorkloadFactory(const std::shared_ptr<ClMemoryManager>& memoryManager,
195 const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr)
196 : m_MemoryManager(memoryManager), m_ModelContextPtr(modelContextPtr)
telsoa014fcda012018-03-09 14:13:49 +0000197{
Sadik Armagandea8fb62020-11-26 10:38:11 +0000198 InitializeCLCompileContext();
telsoa014fcda012018-03-09 14:13:49 +0000199}
200
David Monahan3fb7e102019-08-20 11:25:29 +0100201std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
202 const bool IsMemoryManaged) const
telsoa014fcda012018-03-09 14:13:49 +0000203{
Jan Eilers8eb25602020-03-09 12:13:48 +0000204 IgnoreUnused(IsMemoryManaged);
telsoa01c577f2c2018-08-31 09:22:23 +0100205 std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo);
Aron Virginas-Tar56055192018-11-12 18:10:43 +0000206 tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
telsoa01c577f2c2018-08-31 09:22:23 +0100207
208 return tensorHandle;
telsoa014fcda012018-03-09 14:13:49 +0000209}
210
Francis Murtagh351d13d2018-09-24 15:01:18 +0100211std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
David Monahan3fb7e102019-08-20 11:25:29 +0100212 DataLayout dataLayout,
213 const bool IsMemoryManaged) const
Francis Murtagh351d13d2018-09-24 15:01:18 +0100214{
Jan Eilers8eb25602020-03-09 12:13:48 +0000215 IgnoreUnused(IsMemoryManaged);
Francis Murtagh351d13d2018-09-24 15:01:18 +0100216 std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo, dataLayout);
Aron Virginas-Tar56055192018-11-12 18:10:43 +0000217 tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
Francis Murtagh351d13d2018-09-24 15:01:18 +0100218
219 return tensorHandle;
220}
221
Aron Virginas-Tar8168f402019-10-04 13:10:16 +0100222std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent,
223 TensorShape const& subTensorShape,
telsoa014fcda012018-03-09 14:13:49 +0000224 unsigned int const* subTensorOrigin) const
225{
telsoa014fcda012018-03-09 14:13:49 +0000226 arm_compute::Coordinates coords;
227 arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
228
229 coords.set_num_dimensions(subTensorShape.GetNumDimensions());
230 for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); i++)
231 {
telsoa01c577f2c2018-08-31 09:22:23 +0100232 // Arm compute indexes tensor coords in reverse order.
telsoa014fcda012018-03-09 14:13:49 +0000233 unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
Matthew Sloyan171214c2020-09-09 09:07:37 +0100234 coords.set(i, armnn::numeric_cast<int>(subTensorOrigin[revertedIndex]));
telsoa014fcda012018-03-09 14:13:49 +0000235 }
236
Derek Lamberti0790dce2019-04-15 18:37:35 +0100237 const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
238 if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
239 {
240 return nullptr;
241 }
242
telsoa01c577f2c2018-08-31 09:22:23 +0100243 return std::make_unique<ClSubTensorHandle>(
Jan Eilersbb446e52020-04-02 13:56:54 +0100244 PolymorphicDowncast<IClTensorHandle*>(&parent), shape, coords);
telsoa014fcda012018-03-09 14:13:49 +0000245}
246
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000247std::unique_ptr<IWorkload> ClWorkloadFactory::CreateWorkload(LayerType type,
248 const QueueDescriptor& descriptor,
249 const WorkloadInfo& info) const
250{
251 switch(type)
252 {
253 case LayerType::Activation :
254 {
255 auto activationQueueDescriptor = PolymorphicDowncast<const ActivationQueueDescriptor*>(&descriptor);
256 return MakeWorkload<ClActivationWorkload>(*activationQueueDescriptor, info, m_CLCompileContext);
257 }
258 case LayerType::Addition :
259 {
260 auto additionQueueDescriptor = PolymorphicDowncast<const AdditionQueueDescriptor*>(&descriptor);
261 return MakeWorkload<ClAdditionWorkload>(*additionQueueDescriptor, info, m_CLCompileContext);
262 }
263 case LayerType::ArgMinMax :
264 {
265 auto argMinMaxQueueDescriptor = PolymorphicDowncast<const ArgMinMaxQueueDescriptor*>(&descriptor);
Cathal Corbettea660e12022-06-08 17:36:13 +0100266 return MakeWorkload<ClArgMinMaxWorkload>(*argMinMaxQueueDescriptor, info, m_CLCompileContext);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000267 }
Teresa Charlin94916a52022-10-19 08:48:07 +0100268 case LayerType::BatchMatMul :
269 {
270 auto batchMatMulQueueDescriptor = PolymorphicDowncast<const BatchMatMulQueueDescriptor*>(&descriptor);
271 return std::make_unique<ClBatchMatMulWorkload>(*batchMatMulQueueDescriptor, info, m_CLCompileContext);
272 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000273 case LayerType::BatchNormalization :
274 {
275 auto batchNormalizationQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100276 = PolymorphicDowncast<const BatchNormalizationQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000277 return MakeWorkload<ClBatchNormalizationFloatWorkload, NullWorkload>
Cian McGriskin7894ef92023-08-01 14:04:09 +0100278 (*batchNormalizationQueueDescriptor, info, m_CLCompileContext);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000279 }
280 case LayerType::BatchToSpaceNd :
281 {
282 auto batchToSpaceNdQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100283 = PolymorphicDowncast<const BatchToSpaceNdQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000284 return MakeWorkload<ClBatchToSpaceNdWorkload>(*batchToSpaceNdQueueDescriptor, info, m_CLCompileContext);
285 }
286 case LayerType::Cast :
287 {
288 auto castQueueDescriptor = PolymorphicDowncast<const CastQueueDescriptor*>(&descriptor);
289 return MakeWorkload<ClCastWorkload>(*castQueueDescriptor, info, m_CLCompileContext);
290 }
291 case LayerType::ChannelShuffle :
292 {
293 auto channelShuffleQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100294 = PolymorphicDowncast<const ChannelShuffleQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000295 return MakeWorkload<ClChannelShuffleWorkload>(*channelShuffleQueueDescriptor, info, m_CLCompileContext);
296 }
297 case LayerType::Comparison :
298 {
299 auto comparisonQueueDescriptor = PolymorphicDowncast<const ComparisonQueueDescriptor*>(&descriptor);
300 return MakeWorkload<ClComparisonWorkload>(*comparisonQueueDescriptor, info, m_CLCompileContext);
301 }
302 case LayerType::Concat :
303 {
304 auto concatQueueDescriptor = PolymorphicDowncast<const ConcatQueueDescriptor*>(&descriptor);
305 return MakeWorkload<ClConcatWorkload>(*concatQueueDescriptor, info, m_CLCompileContext);
306 }
307 case LayerType::Constant :
308 {
309 auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
310 return MakeWorkload<ClConstantWorkload>(*constantQueueDescriptor, info, m_CLCompileContext);
311 }
312 case LayerType::ConvertFp16ToFp32 :
313 {
314 auto convertFp16ToFp32QueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100315 = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000316 return MakeWorkload<ClConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor,
317 info,
318 m_CLCompileContext);
319 }
320 case LayerType::ConvertFp32ToFp16 :
321 {
322 auto convertFp32ToFp16QueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100323 = PolymorphicDowncast<const ConvertFp32ToFp16QueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000324 return MakeWorkload<ClConvertFp32ToFp16Workload>(*convertFp32ToFp16QueueDescriptor,
325 info,
326 m_CLCompileContext);
327 }
328 case LayerType::Convolution2d :
329 {
330 auto convolution2dQueueDescriptor = PolymorphicDowncast<const Convolution2dQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000331 bool isFastMathEnabled = false;
332 if (m_ModelContextPtr)
333 {
334 if (m_ModelContextPtr.get() != nullptr)
335 {
336 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
337 if (modelOptions)
338 {
339 isFastMathEnabled = modelOptions->IsFastMathEnabled();
340 }
341 }
342 }
343 return MakeWorkload<ClConvolution2dWorkload>(*convolution2dQueueDescriptor,
344 info,
345 m_MemoryManager->GetIntraLayerManager(),
346 m_CLCompileContext,
347 isFastMathEnabled);
348 }
349 case LayerType::Convolution3d :
350 {
351 auto convolution3dQueueDescriptor = PolymorphicDowncast<const Convolution3dQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000352 bool isFastMathEnabled = false;
353 if (m_ModelContextPtr)
354 {
355 if (m_ModelContextPtr.get() != nullptr)
356 {
357 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
358 if (modelOptions)
359 {
360 isFastMathEnabled = modelOptions->IsFastMathEnabled();
361 }
362 }
363 }
364 return MakeWorkload<ClConvolution3dWorkload>(*convolution3dQueueDescriptor,
365 info,
366 m_MemoryManager->GetIntraLayerManager(),
367 m_CLCompileContext,
368 isFastMathEnabled);
369 }
370 case LayerType::Debug :
371 {
372 auto debugQueueDescriptor = PolymorphicDowncast<const DebugQueueDescriptor*>(&descriptor);
373 return MakeWorkload<NullWorkload, NullWorkload>(*debugQueueDescriptor, info, m_CLCompileContext);
374 }
375 case LayerType::DepthToSpace :
376 {
377 auto depthToSpaceQueueDescriptor = PolymorphicDowncast<const DepthToSpaceQueueDescriptor*>(&descriptor);
378 return MakeWorkload<ClDepthToSpaceWorkload>(*depthToSpaceQueueDescriptor, info, m_CLCompileContext);
379 }
380 case LayerType::DepthwiseConvolution2d :
381 {
382 auto depthwiseConvolution2dQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100383 = PolymorphicDowncast<const DepthwiseConvolution2dQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000384 return MakeWorkload<ClDepthwiseConvolutionWorkload>(*depthwiseConvolution2dQueueDescriptor,
385 info,
386 m_CLCompileContext);
387 }
388 case LayerType::Dequantize :
389 {
390 auto dequantizeQueueDescriptor = PolymorphicDowncast<const DequantizeQueueDescriptor*>(&descriptor);
391 return MakeWorkload<ClDequantizeWorkload>(*dequantizeQueueDescriptor, info, m_CLCompileContext);
392 }
393 case LayerType::DetectionPostProcess :
394 {
395 auto detectionPostProcessQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100396 = PolymorphicDowncast<const DetectionPostProcessQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000397 return MakeWorkload<NullWorkload, NullWorkload>(*detectionPostProcessQueueDescriptor,
398 info,
399 m_CLCompileContext);
400 }
401 case LayerType::Division :
402 {
403 auto divisionQueueDescriptor = PolymorphicDowncast<const DivisionQueueDescriptor*>(&descriptor);
404 return std::make_unique<ClDivisionWorkload>(*divisionQueueDescriptor, info, m_CLCompileContext);
405 }
Mike Kelly3ec30772023-03-08 13:47:17 +0000406 case LayerType::ElementwiseBinary :
407 {
408 auto elementwiseBinaryQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100409 = PolymorphicDowncast<const ElementwiseBinaryQueueDescriptor*>(&descriptor);
Mike Kelly3ec30772023-03-08 13:47:17 +0000410 switch (elementwiseBinaryQueueDescriptor->m_Parameters.m_Operation)
411 {
412 case BinaryOperation::Add:
413 {
414 AdditionQueueDescriptor additionQueueDescriptor;
Cian McGriskin7894ef92023-08-01 14:04:09 +0100415 additionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
Mike Kelly3ec30772023-03-08 13:47:17 +0000416 additionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
417 additionQueueDescriptor.m_AdditionalInfoObject =
Cian McGriskin7894ef92023-08-01 14:04:09 +0100418 elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
Mike Kelly3ec30772023-03-08 13:47:17 +0000419 return std::make_unique<ClAdditionWorkload>(additionQueueDescriptor, info, m_CLCompileContext);
420 }
421 case BinaryOperation::Div:
422 {
423 DivisionQueueDescriptor divisionQueueDescriptor;
Cian McGriskin7894ef92023-08-01 14:04:09 +0100424 divisionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
Mike Kelly3ec30772023-03-08 13:47:17 +0000425 divisionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
426 divisionQueueDescriptor.m_AdditionalInfoObject =
Cian McGriskin7894ef92023-08-01 14:04:09 +0100427 elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
Mike Kelly3ec30772023-03-08 13:47:17 +0000428 return std::make_unique<ClDivisionWorkload>(divisionQueueDescriptor, info, m_CLCompileContext);
429 }
430 case BinaryOperation::Maximum:
431 {
432 MaximumQueueDescriptor maximumQueueDescriptor;
Cian McGriskin7894ef92023-08-01 14:04:09 +0100433 maximumQueueDescriptor.m_Inputs = descriptor.m_Inputs;
Mike Kelly3ec30772023-03-08 13:47:17 +0000434 maximumQueueDescriptor.m_Outputs = descriptor.m_Outputs;
435 maximumQueueDescriptor.m_AdditionalInfoObject =
Cian McGriskin7894ef92023-08-01 14:04:09 +0100436 elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
Mike Kelly3ec30772023-03-08 13:47:17 +0000437 return std::make_unique<ClMaximumWorkload>(maximumQueueDescriptor, info, m_CLCompileContext);
438 }
439 case BinaryOperation::Minimum:
440 {
441 MinimumQueueDescriptor minimumQueueDescriptor;
Cian McGriskin7894ef92023-08-01 14:04:09 +0100442 minimumQueueDescriptor.m_Inputs = descriptor.m_Inputs;
Mike Kelly3ec30772023-03-08 13:47:17 +0000443 minimumQueueDescriptor.m_Outputs = descriptor.m_Outputs;
444 minimumQueueDescriptor.m_AdditionalInfoObject =
Cian McGriskin7894ef92023-08-01 14:04:09 +0100445 elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
Mike Kelly3ec30772023-03-08 13:47:17 +0000446 return std::make_unique<ClMinimumWorkload>(minimumQueueDescriptor, info, m_CLCompileContext);
447 }
448 case BinaryOperation::Mul:
449 {
450 MultiplicationQueueDescriptor multiplicationQueueDescriptor;
Cian McGriskin7894ef92023-08-01 14:04:09 +0100451 multiplicationQueueDescriptor.m_Inputs = descriptor.m_Inputs;
Mike Kelly3ec30772023-03-08 13:47:17 +0000452 multiplicationQueueDescriptor.m_Outputs = descriptor.m_Outputs;
453 multiplicationQueueDescriptor.m_AdditionalInfoObject =
Cian McGriskin7894ef92023-08-01 14:04:09 +0100454 elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
Mike Kelly3ec30772023-03-08 13:47:17 +0000455 return std::make_unique<ClMultiplicationWorkload>(multiplicationQueueDescriptor,
456 info,
457 m_CLCompileContext);
458 }
John Mcloughlin34c1c382023-05-17 15:08:36 +0100459 case BinaryOperation::Power:
460 case BinaryOperation::SqDiff:
461 {
462 return std::make_unique<ClElementwiseBinaryWorkload>(*elementwiseBinaryQueueDescriptor,
463 info,
464 m_CLCompileContext);
465 }
Mike Kelly3ec30772023-03-08 13:47:17 +0000466 case BinaryOperation::Sub:
467 {
468 SubtractionQueueDescriptor subtractionQueueDescriptor;
Cian McGriskin7894ef92023-08-01 14:04:09 +0100469 subtractionQueueDescriptor.m_Inputs = descriptor.m_Inputs;
Mike Kelly3ec30772023-03-08 13:47:17 +0000470 subtractionQueueDescriptor.m_Outputs = descriptor.m_Outputs;
471 subtractionQueueDescriptor.m_AdditionalInfoObject =
Cian McGriskin7894ef92023-08-01 14:04:09 +0100472 elementwiseBinaryQueueDescriptor->m_AdditionalInfoObject;
Mike Kelly3ec30772023-03-08 13:47:17 +0000473 return std::make_unique<ClSubtractionWorkload>(subtractionQueueDescriptor,
474 info,
475 m_CLCompileContext);
476 }
477 default:
478 return nullptr;
479 }
480 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000481 case LayerType::ElementwiseUnary :
482 {
483 auto elementwiseUnaryQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100484 = PolymorphicDowncast<const ElementwiseUnaryQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000485 switch(elementwiseUnaryQueueDescriptor->m_Parameters.m_Operation)
486 {
487 case UnaryOperation::Abs:
488 {
489 AbsQueueDescriptor absQueueDescriptor;
490 absQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
491 absQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000492 return std::make_unique<ClAbsWorkload>(absQueueDescriptor, info, m_CLCompileContext);
493 }
494 case UnaryOperation::Exp:
495 return std::make_unique<ClExpWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
496 case UnaryOperation::Log:
497 return std::make_unique<ClLogWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
498 case UnaryOperation::LogicalNot:
499 return std::make_unique<ClLogicalNotWorkload>(*elementwiseUnaryQueueDescriptor,
500 info,
501 m_CLCompileContext);
502 case UnaryOperation::Neg:
503 return std::make_unique<ClNegWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
504 case UnaryOperation::Rsqrt:
505 {
506 RsqrtQueueDescriptor rsqrtQueueDescriptor;
507 rsqrtQueueDescriptor.m_Inputs = elementwiseUnaryQueueDescriptor->m_Inputs;
508 rsqrtQueueDescriptor.m_Outputs = elementwiseUnaryQueueDescriptor->m_Outputs;
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000509 return std::make_unique<ClRsqrtWorkload>(rsqrtQueueDescriptor, info, m_CLCompileContext);
510 }
511 case UnaryOperation::Sin:
512 return std::make_unique<ClSinWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
Teresa Charlin03027232022-05-09 17:27:08 +0100513 case UnaryOperation::Sqrt:
514 return std::make_unique<ClSqrtWorkload>(*elementwiseUnaryQueueDescriptor, info, m_CLCompileContext);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000515 default:
516 return nullptr;
517 }
518 }
519 case LayerType::Fill :
520 {
521 auto fillQueueDescriptor = PolymorphicDowncast<const FillQueueDescriptor*>(&descriptor);
522 return std::make_unique<ClFillWorkload>(*fillQueueDescriptor, info, m_CLCompileContext);
523 }
524 case LayerType::Floor :
525 {
526 auto floorQueueDescriptor = PolymorphicDowncast<const FloorQueueDescriptor*>(&descriptor);
527 return MakeWorkload<ClFloorFloatWorkload, NullWorkload>(*floorQueueDescriptor, info, m_CLCompileContext);
528 }
529 case LayerType::FullyConnected :
530 {
531 auto fullyConnectedQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100532 = PolymorphicDowncast<const FullyConnectedQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000533 return MakeWorkload<ClFullyConnectedWorkload>(*fullyConnectedQueueDescriptor,
534 info,
535 m_MemoryManager->GetIntraLayerManager(),
536 m_CLCompileContext);
537 }
538 case LayerType::Gather :
539 {
540 auto gatherQueueDescriptor = PolymorphicDowncast<const GatherQueueDescriptor*>(&descriptor);
541 return MakeWorkload<ClGatherWorkload>(*gatherQueueDescriptor, info, m_CLCompileContext);
542 }
Teresa Charlin989e2f62022-04-27 16:26:11 +0100543 case LayerType::GatherNd :
544 {
545 auto gatherNdQueueDescriptor = PolymorphicDowncast<const GatherNdQueueDescriptor*>(&descriptor);
546 return MakeWorkload<ClGatherNdWorkload>(*gatherNdQueueDescriptor, info, m_CLCompileContext);
547 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000548 case LayerType::Input :
549 {
550 auto inputQueueDescriptor = PolymorphicDowncast<const InputQueueDescriptor*>(&descriptor);
551 return std::make_unique<CopyMemGenericWorkload>(*inputQueueDescriptor, info);
552 }
553 case LayerType::InstanceNormalization :
554 {
555 auto instanceNormalizationQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100556 = PolymorphicDowncast<const InstanceNormalizationQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000557 return MakeWorkload<ClInstanceNormalizationWorkload>(*instanceNormalizationQueueDescriptor,
558 info,
559 m_CLCompileContext);
560 }
561 case LayerType::L2Normalization :
562 {
563 auto l2NormalizationQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100564 = PolymorphicDowncast<const L2NormalizationQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000565 return MakeWorkload<ClL2NormalizationFloatWorkload, NullWorkload>(*l2NormalizationQueueDescriptor,
566 info,
567 m_CLCompileContext);
568 }
569 case LayerType::LogicalBinary :
570 {
571 auto logicalBinaryQueueDescriptor = PolymorphicDowncast<const LogicalBinaryQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000572 switch(logicalBinaryQueueDescriptor->m_Parameters.m_Operation)
573 {
574 case LogicalBinaryOperation::LogicalAnd:
575 return std::make_unique<ClLogicalAndWorkload>(*logicalBinaryQueueDescriptor,
576 info,
577 m_CLCompileContext);
578 case LogicalBinaryOperation::LogicalOr:
579 return std::make_unique<ClLogicalOrWorkload>(*logicalBinaryQueueDescriptor,
580 info,
581 m_CLCompileContext);
582 default:
583 return nullptr;
584 }
585 }
586 case LayerType::LogSoftmax :
587 {
588 auto logSoftmaxQueueDescriptor = PolymorphicDowncast<const LogSoftmaxQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000589 return MakeWorkload<ClLogSoftmaxWorkload>(*logSoftmaxQueueDescriptor,
590 info,
591 m_MemoryManager->GetIntraLayerManager(),
592 m_CLCompileContext);
593 }
594 case LayerType::Lstm :
595 {
596 auto lstmQueueDescriptor = PolymorphicDowncast<const LstmQueueDescriptor*>(&descriptor);
597 return MakeWorkload<ClLstmFloatWorkload, NullWorkload>(*lstmQueueDescriptor, info, m_CLCompileContext);
598 }
599 case LayerType::Maximum :
600 {
601 auto maximumQueueDescriptor = PolymorphicDowncast<const MaximumQueueDescriptor*>(&descriptor);
602 return MakeWorkload<ClMaximumWorkload>(*maximumQueueDescriptor, info, m_CLCompileContext);
603 }
604 case LayerType::Mean :
605 {
606 auto meanQueueDescriptor = PolymorphicDowncast<const MeanQueueDescriptor*>(&descriptor);
607 return MakeWorkload<ClMeanWorkload>(*meanQueueDescriptor, info, m_CLCompileContext);
608 }
609 case LayerType::MemCopy :
610 {
611 auto memCopyQueueDescriptor = PolymorphicDowncast<const MemCopyQueueDescriptor*>(&descriptor);
612 if (memCopyQueueDescriptor->m_Inputs.empty() || !memCopyQueueDescriptor->m_Inputs[0])
613 {
614 throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemCopy workload");
615 }
616 return MakeWorkload<CopyMemGenericWorkload>(*memCopyQueueDescriptor, info);
617 }
618 case LayerType::MemImport :
619 {
620 auto memImportQueueDescriptor = PolymorphicDowncast<const MemImportQueueDescriptor*>(&descriptor);
621 if (memImportQueueDescriptor->m_Inputs.empty() || !memImportQueueDescriptor->m_Inputs[0])
622 {
623 throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemImport workload");
624 }
625 return std::make_unique<ImportMemGenericWorkload>(*memImportQueueDescriptor, info);
626 }
627 case LayerType::Minimum :
628 {
629 auto minimumQueueDescriptor = PolymorphicDowncast<const MinimumQueueDescriptor*>(&descriptor);
630 return MakeWorkload<ClMinimumWorkload>(*minimumQueueDescriptor, info, m_CLCompileContext);
631 }
632 case LayerType::Multiplication :
633 {
634 auto multiplicationQueueDescriptor = PolymorphicDowncast<const MultiplicationQueueDescriptor*>(&descriptor);
635 return MakeWorkload<ClMultiplicationWorkload>(*multiplicationQueueDescriptor, info, m_CLCompileContext);
636 }
637 case LayerType::Normalization :
638 {
639 auto normalizationQueueDescriptor = PolymorphicDowncast<const NormalizationQueueDescriptor*>(&descriptor);
640 return MakeWorkload<ClNormalizationFloatWorkload, NullWorkload>(*normalizationQueueDescriptor,
641 info,
642 m_CLCompileContext);
643 }
644 case LayerType::Output :
645 {
646 auto outputQueueDescriptor = PolymorphicDowncast<const OutputQueueDescriptor*>(&descriptor);
647 return std::make_unique<CopyMemGenericWorkload>(*outputQueueDescriptor, info);
648 }
649 case LayerType::Pad :
650 {
651 auto padQueueDescriptor = PolymorphicDowncast<const PadQueueDescriptor*>(&descriptor);
652 return MakeWorkload<ClPadWorkload>(*padQueueDescriptor, info, m_CLCompileContext);
653 }
654 case LayerType::Permute :
655 {
656 auto permuteQueueDescriptor = PolymorphicDowncast<const PermuteQueueDescriptor*>(&descriptor);
657 return MakeWorkload<ClPermuteWorkload>(*permuteQueueDescriptor, info, m_CLCompileContext);
658 }
659 case LayerType::Pooling2d :
660 {
661 auto pooling2dQueueDescriptor = PolymorphicDowncast<const Pooling2dQueueDescriptor*>(&descriptor);
662 return MakeWorkload<ClPooling2dWorkload>(*pooling2dQueueDescriptor, info, m_CLCompileContext);
663 }
Ryan OSheabab8fa92022-03-09 10:29:02 +0000664 case LayerType::Pooling3d :
665 {
666 auto pooling3dQueueDescriptor = PolymorphicDowncast<const Pooling3dQueueDescriptor*>(&descriptor);
667 return MakeWorkload<ClPooling3dWorkload>(*pooling3dQueueDescriptor, info, m_CLCompileContext);
668 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000669 case LayerType::PreCompiled :
670 {
671 auto preCompiledQueueDescriptor = PolymorphicDowncast<const PreCompiledQueueDescriptor*>(&descriptor);
672 return MakeWorkload<NullWorkload, NullWorkload>(*preCompiledQueueDescriptor, info, m_CLCompileContext);
673 }
674 case LayerType::Prelu :
675 {
676 auto preluQueueDescriptor = PolymorphicDowncast<const PreluQueueDescriptor*>(&descriptor);
677 return MakeWorkload<ClPreluWorkload>(*preluQueueDescriptor, info, m_CLCompileContext);
678 }
679 case LayerType::QLstm :
680 {
681 auto qLstmQueueDescriptor = PolymorphicDowncast<const QLstmQueueDescriptor*>(&descriptor);
682 return std::make_unique<ClQLstmWorkload>(*qLstmQueueDescriptor, info, m_CLCompileContext);
683 }
684 case LayerType::Quantize :
685 {
686 auto quantizeQueueDescriptor = PolymorphicDowncast<const QuantizeQueueDescriptor*>(&descriptor);
687 return MakeWorkload<ClQuantizeWorkload>(*quantizeQueueDescriptor, info, m_CLCompileContext);
688 }
689 case LayerType::QuantizedLstm :
690 {
691 auto quantizedLstmQueueDescriptor = PolymorphicDowncast<const QuantizedLstmQueueDescriptor*>(&descriptor);
692 return MakeWorkload<ClQuantizedLstmWorkload>(*quantizedLstmQueueDescriptor, info, m_CLCompileContext);
693 }
694 case LayerType::Rank :
695 {
696 auto rankQueueDescriptor = PolymorphicDowncast<const RankQueueDescriptor*>(&descriptor);
697 return std::make_unique<ClRankWorkload>(*rankQueueDescriptor, info);
698 }
699 case LayerType::Reduce :
700 {
701 auto reduceQueueDescriptor = PolymorphicDowncast<const ReduceQueueDescriptor*>(&descriptor);
702 return std::make_unique<ClReduceWorkload>(*reduceQueueDescriptor, info);
703 }
704 case LayerType::Reshape :
705 {
706 auto reshapeQueueDescriptor = PolymorphicDowncast<const ReshapeQueueDescriptor*>(&descriptor);
707 return MakeWorkload<ClReshapeWorkload>(*reshapeQueueDescriptor, info, m_CLCompileContext);
708 }
709 case LayerType::Resize :
710 {
711 auto resizeQueueDescriptor = PolymorphicDowncast<const ResizeQueueDescriptor*>(&descriptor);
712 return MakeWorkload<ClResizeWorkload>(*resizeQueueDescriptor, info, m_CLCompileContext);
713 }
714 case LayerType::Slice :
715 {
716 auto sliceQueueDescriptor = PolymorphicDowncast<const SliceQueueDescriptor*>(&descriptor);
717 return MakeWorkload<ClSliceWorkload>(*sliceQueueDescriptor, info, m_CLCompileContext);
718 }
719 case LayerType::Softmax :
720 {
721 auto softmaxQueueDescriptor = PolymorphicDowncast<const SoftmaxQueueDescriptor*>(&descriptor);
722 return std::make_unique<ClSoftmaxWorkload>(*softmaxQueueDescriptor,
723 info,
724 m_MemoryManager->GetIntraLayerManager(),
725 m_CLCompileContext);
726 }
727 case LayerType::SpaceToBatchNd :
728 {
729 auto spaceToBatchNdQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100730 = PolymorphicDowncast<const SpaceToBatchNdQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000731 return MakeWorkload<ClSpaceToBatchNdWorkload>(*spaceToBatchNdQueueDescriptor, info, m_CLCompileContext);
732 }
733 case LayerType::SpaceToDepth :
734 {
735 auto spaceToDepthQueueDescriptor = PolymorphicDowncast<const SpaceToDepthQueueDescriptor*>(&descriptor);
736 return MakeWorkload<ClSpaceToDepthWorkload>(*spaceToDepthQueueDescriptor, info, m_CLCompileContext);
737 }
738 case LayerType::Splitter :
739 {
740 auto splitterQueueDescriptor = PolymorphicDowncast<const SplitterQueueDescriptor*>(&descriptor);
741 return MakeWorkload<ClSplitterWorkload>(*splitterQueueDescriptor, info, m_CLCompileContext);
742 }
743 case LayerType::Stack :
744 {
745 auto stackQueueDescriptor = PolymorphicDowncast<const StackQueueDescriptor*>(&descriptor);
746 return MakeWorkload<ClStackWorkload>(*stackQueueDescriptor, info, m_CLCompileContext);
747 }
748 case LayerType::StridedSlice :
749 {
750 auto stridedSliceQueueDescriptor = PolymorphicDowncast<const StridedSliceQueueDescriptor*>(&descriptor);
751 return MakeWorkload<ClStridedSliceWorkload>(*stridedSliceQueueDescriptor, info, m_CLCompileContext);
752 }
753 case LayerType::Subtraction :
754 {
755 auto subtractionQueueDescriptor = PolymorphicDowncast<const SubtractionQueueDescriptor*>(&descriptor);
756 return MakeWorkload<ClSubtractionWorkload>(*subtractionQueueDescriptor, info, m_CLCompileContext);
757 }
Cian McGriskin3b3dcbf2023-07-26 11:52:47 +0100758 case LayerType::Tile:
759 {
760 auto tileQueueDescriptor = PolymorphicDowncast<const TileQueueDescriptor*>(&descriptor);
761 return MakeWorkload<ClTileWorkload>(*tileQueueDescriptor, info, m_CLCompileContext);
762 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000763 case LayerType::Transpose :
764 {
765 auto transposeQueueDescriptor = PolymorphicDowncast<const TransposeQueueDescriptor*>(&descriptor);
766 return MakeWorkload<ClTransposeWorkload>(*transposeQueueDescriptor, info, m_CLCompileContext);
767 }
768 case LayerType::TransposeConvolution2d :
769 {
770 auto transposeConvolution2dQueueDescriptor
Cian McGriskin7894ef92023-08-01 14:04:09 +0100771 = PolymorphicDowncast<const TransposeConvolution2dQueueDescriptor*>(&descriptor);
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000772 return MakeWorkload<ClTransposeConvolution2dWorkload>(*transposeConvolution2dQueueDescriptor,
773 info,
774 m_MemoryManager->GetIntraLayerManager(),
775 m_CLCompileContext);
776 }
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000777 case LayerType::UnidirectionalSequenceLstm :
778 {
779 auto desc = PolymorphicDowncast<const UnidirectionalSequenceLstmQueueDescriptor*>(&descriptor);
780 return MakeWorkloadHelper<ClUnidirectionalSequenceLstmFloatWorkload, NullWorkload>(*desc,
781 info,
782 m_CLCompileContext);
783 }
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000784 default:
785 return nullptr;
786 }
787}
788
telsoa014fcda012018-03-09 14:13:49 +0000789
Aron Virginas-Tarb2801962019-09-30 11:24:53 +0100790
telsoa014fcda012018-03-09 14:13:49 +0000791} // namespace armnn