blob: b496238cf308d190b5ee2bbb879bcba22fba6166 [file] [log] [blame]
arovir014424b0a2018-10-04 10:46:04 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "NeonBackend.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "NeonBackendId.hpp"
Sadik Armagan045f6be2020-09-10 13:37:32 +01008#include "NeonBackendModelContext.hpp"
arovir01a0944792018-10-11 15:00:58 +01009#include "NeonWorkloadFactory.hpp"
David Beck111b5d92018-11-12 14:59:37 +000010#include "NeonLayerSupport.hpp"
Narumol Prangnawarat4e3e8182019-08-14 12:25:50 +010011#include "NeonTensorHandleFactory.hpp"
arovir01a0944792018-10-11 15:00:58 +010012
Matteo Martincighc601aa62019-10-29 15:03:22 +000013#include <armnn/BackendRegistry.hpp>
Mike Kelly07810fc2020-11-12 10:58:48 +000014#include <armnn/Descriptors.hpp>
Matteo Martincighc601aa62019-10-29 15:03:22 +000015
Mike Kelly07810fc2020-11-12 10:58:48 +000016#include <aclCommon/ArmComputeSubgraphUtils.hpp>
17#include <aclCommon/ArmComputeUtils.hpp>
Aron Virginas-Tar56055192018-11-12 18:10:43 +000018#include <aclCommon/BaseMemoryManager.hpp>
19
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000020#include <armnn/backends/IBackendContext.hpp>
21#include <armnn/backends/IMemoryManager.hpp>
Aron Virginas-Tar56055192018-11-12 18:10:43 +000022
Jan Eilers3c9e0452020-04-10 13:00:44 +010023#include <armnn/utility/PolymorphicDowncast.hpp>
24
Mike Kelly07810fc2020-11-12 10:58:48 +000025#include "workloads/NeonAdditionWorkload.hpp"
26#include "workloads/NeonBatchNormalizationWorkload.hpp"
27#include "workloads/NeonConvolution2dWorkload.hpp"
28#include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
29#include "workloads/NeonDivisionWorkload.hpp"
30#include "workloads/NeonFullyConnectedWorkload.hpp"
31#include "workloads/NeonMultiplicationWorkload.hpp"
Matthew Sloyan5fc0fd62021-05-03 12:22:03 +010032#include "workloads/NeonReduceWorkload.hpp"
Mike Kelly07810fc2020-11-12 10:58:48 +000033#include "workloads/NeonSubtractionWorkload.hpp"
34
David Beck263e3492018-11-09 14:46:40 +000035#include <Optimizer.hpp>
arovir01a0944792018-10-11 15:00:58 +010036
Mike Kelly07810fc2020-11-12 10:58:48 +000037#include <arm_compute/core/Types.h>
Aron Virginas-Tar56055192018-11-12 18:10:43 +000038#include <arm_compute/runtime/Allocator.h>
39
arovir014424b0a2018-10-04 10:46:04 +010040namespace armnn
41{
42
David Beck3cc9a622018-10-12 10:38:31 +010043const BackendId& NeonBackend::GetIdStatic()
arovir014424b0a2018-10-04 10:46:04 +010044{
David Beck3e9e1152018-10-17 14:17:50 +010045 static const BackendId s_Id{NeonBackendId()};
arovir014424b0a2018-10-04 10:46:04 +010046 return s_Id;
47}
48
Aron Virginas-Tar56055192018-11-12 18:10:43 +000049IBackendInternal::IMemoryManagerUniquePtr NeonBackend::CreateMemoryManager() const
arovir014424b0a2018-10-04 10:46:04 +010050{
Aron Virginas-Tar56055192018-11-12 18:10:43 +000051 return std::make_unique<NeonMemoryManager>(std::make_unique<arm_compute::Allocator>(),
Sadik Armagan13a9fa62019-04-26 16:04:34 +010052 BaseMemoryManager::MemoryAffinity::Offset);
Aron Virginas-Tar56055192018-11-12 18:10:43 +000053}
54
55IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory(
56 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const
57{
58 return std::make_unique<NeonWorkloadFactory>(
Jan Eilers3c9e0452020-04-10 13:00:44 +010059 PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager));
arovir014424b0a2018-10-04 10:46:04 +010060}
61
Narumol Prangnawarat4e3e8182019-08-14 12:25:50 +010062IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory(
Sadik Armagan04a72972020-09-14 15:44:18 +010063 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const ModelOptions& modelOptions) const
64{
65 return std::make_unique<NeonWorkloadFactory>(
66 PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager), CreateBackendSpecificModelContext(modelOptions));
67}
68
69IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory(
Narumol Prangnawarat4e3e8182019-08-14 12:25:50 +010070 class TensorHandleFactoryRegistry& tensorHandleFactoryRegistry) const
71{
72 auto memoryManager = std::make_shared<NeonMemoryManager>(std::make_unique<arm_compute::Allocator>(),
73 BaseMemoryManager::MemoryAffinity::Offset);
74
75 tensorHandleFactoryRegistry.RegisterMemoryManager(memoryManager);
Narumol Prangnawarat549cb7a2020-07-10 17:50:53 +010076 tensorHandleFactoryRegistry.RegisterFactory(std::make_unique<NeonTensorHandleFactory>(memoryManager));
77
Narumol Prangnawarat4e3e8182019-08-14 12:25:50 +010078 return std::make_unique<NeonWorkloadFactory>(
Jan Eilers3c9e0452020-04-10 13:00:44 +010079 PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager));
Narumol Prangnawarat4e3e8182019-08-14 12:25:50 +010080}
81
Sadik Armagan04a72972020-09-14 15:44:18 +010082IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory(
83 TensorHandleFactoryRegistry& tensorHandleFactoryRegistry, const ModelOptions& modelOptions) const
84{
85 auto memoryManager = std::make_shared<NeonMemoryManager>(std::make_unique<arm_compute::Allocator>(),
86 BaseMemoryManager::MemoryAffinity::Offset);
87
88 tensorHandleFactoryRegistry.RegisterMemoryManager(memoryManager);
89 tensorHandleFactoryRegistry.RegisterFactory(std::make_unique<NeonTensorHandleFactory>(memoryManager));
90
91 return std::make_unique<NeonWorkloadFactory>(
92 PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager), CreateBackendSpecificModelContext(modelOptions));
93}
94
David Beck263e3492018-11-09 14:46:40 +000095IBackendInternal::IBackendContextPtr NeonBackend::CreateBackendContext(const IRuntime::CreationOptions&) const
96{
97 return IBackendContextPtr{};
98}
99
Colm Donelane49755b2020-01-29 15:22:43 +0000100IBackendInternal::IBackendProfilingContextPtr NeonBackend::CreateBackendProfilingContext(
Colm Donelan1aff3932020-02-05 17:48:59 +0000101 const IRuntime::CreationOptions&, IBackendProfilingPtr&)
Colm Donelane49755b2020-01-29 15:22:43 +0000102{
103 return IBackendProfilingContextPtr{};
104}
105
David Beck263e3492018-11-09 14:46:40 +0000106IBackendInternal::Optimizations NeonBackend::GetOptimizations() const
107{
108 return Optimizations{};
109}
110
Sadik Armagan045f6be2020-09-10 13:37:32 +0100111IBackendInternal::IBackendSpecificModelContextPtr NeonBackend::CreateBackendSpecificModelContext(
112 const ModelOptions& modelOptions) const
113{
114 return IBackendSpecificModelContextPtr{new NeonBackendModelContext{modelOptions}};
115}
116
David Beck111b5d92018-11-12 14:59:37 +0000117IBackendInternal::ILayerSupportSharedPtr NeonBackend::GetLayerSupport() const
118{
Sadik Armagan045f6be2020-09-10 13:37:32 +0100119 static ILayerSupportSharedPtr layerSupport
120 {
121 new NeonLayerSupport(IBackendInternal::IBackendSpecificModelContextPtr{})
122 };
123 return layerSupport;
124}
125
126IBackendInternal::ILayerSupportSharedPtr NeonBackend::GetLayerSupport(const ModelOptions& modelOptions) const
127{
128 static ILayerSupportSharedPtr layerSupport
129 {
130 new NeonLayerSupport(CreateBackendSpecificModelContext(modelOptions))
131 };
David Beck111b5d92018-11-12 14:59:37 +0000132 return layerSupport;
133}
134
Sadik Armaganaede8ca2021-03-31 16:12:13 +0100135bool NeonBackend::HasCapability(BackendCapability capabilityClass) const
136{
137 auto search = cpuAccCapabilities.find(capabilityClass);
138 if (search != cpuAccCapabilities.end())
139 {
140 return true;
141 }
142 return false;
143}
144
Matteo Martincighc3ba50e2019-05-22 14:28:16 +0100145OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph) const
Matteo Martincighadddddb2019-01-24 14:06:23 +0000146{
Matteo Martincighc3ba50e2019-05-22 14:28:16 +0100147 OptimizationViews optimizationViews;
Matteo Martincighadddddb2019-01-24 14:06:23 +0000148
Mike Kelly07810fc2020-11-12 10:58:48 +0000149 auto it = subgraph.end();
Mike Kelly1ac690a2020-11-17 11:41:38 +0000150 std::map<LayerGuid, Layer*> untouched;
Mike Kelly07810fc2020-11-12 10:58:48 +0000151
152 while (it != subgraph.begin())
153 {
154 --it;
155 Layer& base = **it;
Mike Kelly1ac690a2020-11-17 11:41:38 +0000156 untouched.insert({base.GetGuid(), &base});
157 }
158
159 it = subgraph.end();
160 while (it != subgraph.begin())
161 {
162 --it;
163 Layer& base = **it;
Mike Kelly07810fc2020-11-12 10:58:48 +0000164
Matthew Sloyan5fc0fd62021-05-03 12:22:03 +0100165 // Fuse activation into previous layer if supported by backend
Mike Kelly07810fc2020-11-12 10:58:48 +0000166 if ((base.GetType() == LayerType::DepthwiseConvolution2d || base.GetType() == LayerType::Convolution2d
167 || base.GetType() == LayerType::BatchNormalization || base.GetType() == LayerType::FullyConnected
168 || base.GetType() == LayerType::Addition || base.GetType() == LayerType::Multiplication
Matthew Sloyanae123062021-05-07 14:18:01 +0000169 || base.GetType() == LayerType::Subtraction || base.GetType() == LayerType::Division)
Mike Kelly07810fc2020-11-12 10:58:48 +0000170 && (base.GetAdditionalInformation<ActivationDescriptor>() == nullptr))
171 {
172 for (auto output = base.BeginOutputSlots(); output != base.EndOutputSlots(); ++output)
173 {
174 if (output->GetNumConnections() == 1)
175 {
176 for (auto&& childInput : output->GetConnections())
177 {
Teresa Charlind672f5d2021-01-18 18:07:57 +0000178 if ((childInput->GetOwningLayer().GetType() == LayerType::Activation) &&
179 (checkDataTypeInputandOutput(childInput->GetOwningLayer())))
Mike Kelly07810fc2020-11-12 10:58:48 +0000180 {
181 Layer& child = childInput->GetOwningLayer();
182
183 auto* activationLayer = PolymorphicDowncast<ActivationLayer*>(&child);
184
185 const std::string name = std::string("fused-") + child.GetName() + std::string("-into-") +
186 base.GetName();
187
188 // Get params from activation layer
189 ActivationDescriptor activationDesc = activationLayer->GetParameters();
190
191 if (base.GetType() == LayerType::Convolution2d)
192 {
193 Convolution2dLayer* baseLayer = PolymorphicDowncast<Convolution2dLayer*>(&base);
194
195 Optional<TensorInfo> biases;
196
197 if (baseLayer->GetParameters().m_BiasEnabled)
198 {
Mike Kelly1ac690a2020-11-17 11:41:38 +0000199 biases = baseLayer->m_Bias->GetTensorInfo();
Mike Kelly07810fc2020-11-12 10:58:48 +0000200 }
201
202 arm_compute::Status status = NeonConvolution2dWorkloadValidate(
203 baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
204 activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
205 baseLayer->GetParameters(),
206 baseLayer->m_Weight->GetTensorInfo(),
207 biases,
208 false,
209 &activationDesc);
210
211 if (status)
212 {
213 FuseLayerWithWeightsAndBiases<Convolution2dLayer>(optimizationViews,
214 baseLayer,
215 activationLayer,
216 activationDesc,
217 name);
Mike Kelly1ac690a2020-11-17 11:41:38 +0000218 untouched.erase(baseLayer->GetGuid());
219 untouched.erase(activationLayer->GetGuid());
Mike Kelly07810fc2020-11-12 10:58:48 +0000220 }
221 }
222 else if (base.GetType() == LayerType::DepthwiseConvolution2d)
223 {
224 DepthwiseConvolution2dLayer* baseLayer =
225 PolymorphicDowncast<DepthwiseConvolution2dLayer*>(&base);
226
227 Optional<TensorInfo> biases;
228
229 if (baseLayer->GetParameters().m_BiasEnabled)
230 {
Mike Kelly1ac690a2020-11-17 11:41:38 +0000231 biases = baseLayer->m_Bias->GetTensorInfo();
Mike Kelly07810fc2020-11-12 10:58:48 +0000232 }
233
234 arm_compute::Status status = NeonDepthwiseConvolutionWorkloadValidate(
235 baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
236 activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
237 baseLayer->GetParameters(),
238 baseLayer->m_Weight->GetTensorInfo(),
239 biases,
240 &activationDesc);
241
242 if (status)
243 {
244 FuseLayerWithWeightsAndBiases<DepthwiseConvolution2dLayer>(optimizationViews,
245 baseLayer,
246 activationLayer,
247 activationDesc,
248 name);
Mike Kelly1ac690a2020-11-17 11:41:38 +0000249 untouched.erase(baseLayer->GetGuid());
250 untouched.erase(activationLayer->GetGuid());
Mike Kelly07810fc2020-11-12 10:58:48 +0000251 }
252 }
253 else if (base.GetType() == LayerType::FullyConnected)
254 {
255 FullyConnectedLayer* baseLayer = PolymorphicDowncast<FullyConnectedLayer*>(&base);
256
257 arm_compute::Status status = NeonFullyConnectedWorkloadValidate(
258 baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
259 activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
260 baseLayer->m_Weight->GetTensorInfo(),
261 baseLayer->m_Bias->GetTensorInfo(),
262 baseLayer->GetParameters(),
263 &activationDesc);
264
265 if (status)
266 {
267 FuseLayerWithWeightsAndBiases<FullyConnectedLayer>(optimizationViews,
268 baseLayer,
269 activationLayer,
270 activationDesc,
271 name);
Mike Kelly1ac690a2020-11-17 11:41:38 +0000272 untouched.erase(baseLayer->GetGuid());
273 untouched.erase(activationLayer->GetGuid());
Mike Kelly07810fc2020-11-12 10:58:48 +0000274 }
275 }
276 else if (base.GetType() == LayerType::BatchNormalization)
277 {
278 BatchNormalizationLayer* baseLayer =
279 PolymorphicDowncast<BatchNormalizationLayer*>(&base);
280
281 arm_compute::Status status = NeonBatchNormalizationValidate(
282 baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
283 activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
284 baseLayer->m_Mean->GetTensorInfo(),
285 baseLayer->m_Variance->GetTensorInfo(),
286 baseLayer->m_Beta->GetTensorInfo(),
287 baseLayer->m_Gamma->GetTensorInfo(),
288 baseLayer->GetParameters(),
289 &activationDesc);
290
291 if (status)
292 {
293 BatchNormalizationLayer* replacementLayer =
294 FuseLayerWithParameters<BatchNormalizationLayer>(
295 optimizationViews,
296 baseLayer,
297 activationLayer,
298 activationDesc,
299 name);
300
301 replacementLayer->m_Beta = std::move(baseLayer->m_Beta);
302 replacementLayer->m_Gamma = std::move(baseLayer->m_Gamma);
303 replacementLayer->m_Mean = std::move(baseLayer->m_Mean);
304 replacementLayer->m_Variance = std::move(baseLayer->m_Variance);
Mike Kelly1ac690a2020-11-17 11:41:38 +0000305 untouched.erase(baseLayer->GetGuid());
306 untouched.erase(activationLayer->GetGuid());
Mike Kelly07810fc2020-11-12 10:58:48 +0000307 }
308 }
309 else if (base.GetType() == LayerType::Addition)
310 {
311 AdditionLayer* baseLayer = PolymorphicDowncast<AdditionLayer*>(&base);
312
313 arm_compute::Status status = NeonAdditionWorkloadValidate(
314 baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
315 baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
316 activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
317 &activationDesc);
318
319 if (status)
320 {
321 FuseLayerWithoutParameters<AdditionLayer>(optimizationViews,
322 baseLayer,
323 activationLayer,
324 activationDesc,
325 name);
Mike Kelly1ac690a2020-11-17 11:41:38 +0000326 untouched.erase(baseLayer->GetGuid());
327 untouched.erase(activationLayer->GetGuid());
Mike Kelly07810fc2020-11-12 10:58:48 +0000328 }
329 }
330 else if (base.GetType() == LayerType::Division)
331 {
332 DivisionLayer* baseLayer = PolymorphicDowncast<DivisionLayer*>(&base);
333
334 arm_compute::Status status = NeonDivisionWorkloadValidate(
335 baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
336 baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
337 activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
338 &activationDesc);
339
340 if (status)
341 {
342 FuseLayerWithoutParameters<DivisionLayer>(optimizationViews,
343 baseLayer,
344 activationLayer,
345 activationDesc,
346 name);
Mike Kelly1ac690a2020-11-17 11:41:38 +0000347 untouched.erase(baseLayer->GetGuid());
348 untouched.erase(activationLayer->GetGuid());
Mike Kelly07810fc2020-11-12 10:58:48 +0000349 }
350 }
351 else if (base.GetType() == LayerType::Multiplication)
352 {
353 MultiplicationLayer* baseLayer = PolymorphicDowncast<MultiplicationLayer*>(&base);
354
355 arm_compute::Status status = NeonMultiplicationWorkloadValidate(
356 baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
357 baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
358 activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
359 &activationDesc);
360
361 if (status)
362 {
363 FuseLayerWithoutParameters<MultiplicationLayer>(optimizationViews,
364 baseLayer,
365 activationLayer,
366 activationDesc,
367 name);
Mike Kelly1ac690a2020-11-17 11:41:38 +0000368 untouched.erase(baseLayer->GetGuid());
369 untouched.erase(activationLayer->GetGuid());
Mike Kelly07810fc2020-11-12 10:58:48 +0000370 }
371 }
372 else if (base.GetType() == LayerType::Subtraction)
373 {
374 SubtractionLayer* baseLayer = PolymorphicDowncast<SubtractionLayer*>(&base);
375
376 arm_compute::Status status = NeonSubtractionWorkloadValidate(
377 baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
378 baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
379 activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
380 &activationDesc);
381
382 if (status)
383 {
384 FuseLayerWithoutParameters<SubtractionLayer>(optimizationViews,
385 baseLayer,
386 activationLayer,
387 activationDesc,
388 name);
Mike Kelly1ac690a2020-11-17 11:41:38 +0000389 untouched.erase(baseLayer->GetGuid());
390 untouched.erase(activationLayer->GetGuid());
Mike Kelly07810fc2020-11-12 10:58:48 +0000391 }
392 }
393 }
394 }
395 }
396 }
397 }
Matthew Sloyan5fc0fd62021-05-03 12:22:03 +0100398
399 // Separate reduce layer with multiple axes into multiple reduce layers with 1 axis.
400 if (base.GetType() == LayerType::Reduce)
401 {
402 ReduceLayer* baseLayer = PolymorphicDowncast<ReduceLayer*>(&base);
403 ReduceDescriptor reduceDescriptor = baseLayer->GetParameters();
404
405 if (!reduceDescriptor.m_vAxis.empty() && reduceDescriptor.m_vAxis.size() > 1)
406 {
407 // Add new layers to the graph and connect them.
408 std::vector<Layer*> layers = ChainReduceLayers<ReduceLayer>(optimizationViews,
409 baseLayer,
410 reduceDescriptor);
411
412 // Replace existing baselayer with new subgraph.
413 ReplaceLayers<ReduceLayer>(optimizationViews, baseLayer, layers);
414 untouched.erase(baseLayer->GetGuid());
415 }
416 }
Mike Kelly07810fc2020-11-12 10:58:48 +0000417 }
418
419 if (optimizationViews.GetSubstitutions().empty())
420 {
421 optimizationViews.AddUntouchedSubgraph(SubgraphView(subgraph));
422 }
Mike Kelly1ac690a2020-11-17 11:41:38 +0000423 else
424 {
425 ReportUntouchedLayers(optimizationViews, untouched);
426 }
Matteo Martincighc3ba50e2019-05-22 14:28:16 +0100427
428 return optimizationViews;
Matteo Martincighadddddb2019-01-24 14:06:23 +0000429}
430
Narumol Prangnawarat4e3e8182019-08-14 12:25:50 +0100431std::vector<ITensorHandleFactory::FactoryId> NeonBackend::GetHandleFactoryPreferences() const
432{
Narumol Prangnawarat265e53e2020-10-30 16:06:55 +0000433 return std::vector<ITensorHandleFactory::FactoryId>() = { NeonTensorHandleFactory::GetIdStatic() };
Narumol Prangnawarat4e3e8182019-08-14 12:25:50 +0100434}
435
436void NeonBackend::RegisterTensorHandleFactories(class TensorHandleFactoryRegistry& registry)
437{
438 auto memoryManager = std::make_shared<NeonMemoryManager>(std::make_unique<arm_compute::Allocator>(),
439 BaseMemoryManager::MemoryAffinity::Offset);
440
441 registry.RegisterMemoryManager(memoryManager);
Jan Eilerse9f0f0f2019-08-16 10:28:37 +0100442 registry.RegisterFactory(std::make_unique<NeonTensorHandleFactory>(memoryManager));
Narumol Prangnawarat4e3e8182019-08-14 12:25:50 +0100443}
444
Matthew Bentham42bad952018-12-17 09:23:36 +0000445} // namespace armnn