blob: 7089f23efab796105daa447cd59986218c80d9a3 [file] [log] [blame]
arovir014424b0a2018-10-04 10:46:04 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "NeonBackend.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "NeonBackendId.hpp"
Sadik Armagan045f6be2020-09-10 13:37:32 +01008#include "NeonBackendModelContext.hpp"
arovir01a0944792018-10-11 15:00:58 +01009#include "NeonWorkloadFactory.hpp"
David Beck111b5d92018-11-12 14:59:37 +000010#include "NeonLayerSupport.hpp"
Narumol Prangnawarat4e3e8182019-08-14 12:25:50 +010011#include "NeonTensorHandleFactory.hpp"
arovir01a0944792018-10-11 15:00:58 +010012
Matteo Martincighc601aa62019-10-29 15:03:22 +000013#include <armnn/BackendRegistry.hpp>
Mike Kelly07810fc2020-11-12 10:58:48 +000014#include <armnn/Descriptors.hpp>
Matteo Martincighc601aa62019-10-29 15:03:22 +000015
Mike Kelly07810fc2020-11-12 10:58:48 +000016#include <aclCommon/ArmComputeSubgraphUtils.hpp>
17#include <aclCommon/ArmComputeUtils.hpp>
Aron Virginas-Tar56055192018-11-12 18:10:43 +000018#include <aclCommon/BaseMemoryManager.hpp>
19
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000020#include <armnn/backends/IBackendContext.hpp>
21#include <armnn/backends/IMemoryManager.hpp>
Aron Virginas-Tar56055192018-11-12 18:10:43 +000022
Jan Eilers3c9e0452020-04-10 13:00:44 +010023#include <armnn/utility/PolymorphicDowncast.hpp>
24
Francis Murtaghe8d7ccb2021-10-14 17:30:24 +010025#include <neon/workloads/NeonAdditionWorkload.hpp>
26#include <neon/workloads/NeonBatchNormalizationWorkload.hpp>
27#include <neon/workloads/NeonConvolution2dWorkload.hpp>
28#include <neon/workloads/NeonDepthwiseConvolutionWorkload.hpp>
29#include <neon/workloads/NeonDivisionWorkload.hpp>
30#include <neon/workloads/NeonFullyConnectedWorkload.hpp>
31#include <neon/workloads/NeonMultiplicationWorkload.hpp>
32#include <neon/workloads/NeonReduceWorkload.hpp>
33#include <neon/workloads/NeonSubtractionWorkload.hpp>
34#include <backendsCommon/DefaultAllocator.hpp>
Mike Kelly07810fc2020-11-12 10:58:48 +000035
David Beck263e3492018-11-09 14:46:40 +000036#include <Optimizer.hpp>
arovir01a0944792018-10-11 15:00:58 +010037
Mike Kelly07810fc2020-11-12 10:58:48 +000038#include <arm_compute/core/Types.h>
Aron Virginas-Tar56055192018-11-12 18:10:43 +000039#include <arm_compute/runtime/Allocator.h>
40
arovir014424b0a2018-10-04 10:46:04 +010041namespace armnn
42{
43
David Beck3cc9a622018-10-12 10:38:31 +010044const BackendId& NeonBackend::GetIdStatic()
arovir014424b0a2018-10-04 10:46:04 +010045{
David Beck3e9e1152018-10-17 14:17:50 +010046 static const BackendId s_Id{NeonBackendId()};
arovir014424b0a2018-10-04 10:46:04 +010047 return s_Id;
48}
49
Aron Virginas-Tar56055192018-11-12 18:10:43 +000050IBackendInternal::IMemoryManagerUniquePtr NeonBackend::CreateMemoryManager() const
arovir014424b0a2018-10-04 10:46:04 +010051{
Aron Virginas-Tar56055192018-11-12 18:10:43 +000052 return std::make_unique<NeonMemoryManager>(std::make_unique<arm_compute::Allocator>(),
Sadik Armagan13a9fa62019-04-26 16:04:34 +010053 BaseMemoryManager::MemoryAffinity::Offset);
Aron Virginas-Tar56055192018-11-12 18:10:43 +000054}
55
56IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory(
57 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const
58{
59 return std::make_unique<NeonWorkloadFactory>(
Jan Eilers3c9e0452020-04-10 13:00:44 +010060 PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager));
arovir014424b0a2018-10-04 10:46:04 +010061}
62
Narumol Prangnawarat4e3e8182019-08-14 12:25:50 +010063IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory(
Sadik Armagan04a72972020-09-14 15:44:18 +010064 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const ModelOptions& modelOptions) const
65{
66 return std::make_unique<NeonWorkloadFactory>(
67 PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager), CreateBackendSpecificModelContext(modelOptions));
68}
69
70IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory(
Narumol Prangnawarat4e3e8182019-08-14 12:25:50 +010071 class TensorHandleFactoryRegistry& tensorHandleFactoryRegistry) const
72{
73 auto memoryManager = std::make_shared<NeonMemoryManager>(std::make_unique<arm_compute::Allocator>(),
74 BaseMemoryManager::MemoryAffinity::Offset);
75
76 tensorHandleFactoryRegistry.RegisterMemoryManager(memoryManager);
Narumol Prangnawarat77400452022-01-13 17:43:41 +000077
78 auto factory = std::make_unique<NeonTensorHandleFactory>(memoryManager);
79 // Register copy and import factory pair
80 tensorHandleFactoryRegistry.RegisterCopyAndImportFactoryPair(factory->GetId(), factory->GetId());
81 // Register the factory
82 tensorHandleFactoryRegistry.RegisterFactory(std::move(factory));
83
Narumol Prangnawarat549cb7a2020-07-10 17:50:53 +010084
Narumol Prangnawarat4e3e8182019-08-14 12:25:50 +010085 return std::make_unique<NeonWorkloadFactory>(
Jan Eilers3c9e0452020-04-10 13:00:44 +010086 PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager));
Narumol Prangnawarat4e3e8182019-08-14 12:25:50 +010087}
88
Sadik Armagan04a72972020-09-14 15:44:18 +010089IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory(
90 TensorHandleFactoryRegistry& tensorHandleFactoryRegistry, const ModelOptions& modelOptions) const
91{
92 auto memoryManager = std::make_shared<NeonMemoryManager>(std::make_unique<arm_compute::Allocator>(),
93 BaseMemoryManager::MemoryAffinity::Offset);
94
95 tensorHandleFactoryRegistry.RegisterMemoryManager(memoryManager);
Narumol Prangnawarat77400452022-01-13 17:43:41 +000096
97 auto factory = std::make_unique<NeonTensorHandleFactory>(memoryManager);
98 // Register copy and import factory pair
99 tensorHandleFactoryRegistry.RegisterCopyAndImportFactoryPair(factory->GetId(), factory->GetId());
100 // Register the factory
101 tensorHandleFactoryRegistry.RegisterFactory(std::move(factory));
Sadik Armagan04a72972020-09-14 15:44:18 +0100102
103 return std::make_unique<NeonWorkloadFactory>(
104 PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager), CreateBackendSpecificModelContext(modelOptions));
105}
106
David Beck263e3492018-11-09 14:46:40 +0000107IBackendInternal::IBackendContextPtr NeonBackend::CreateBackendContext(const IRuntime::CreationOptions&) const
108{
109 return IBackendContextPtr{};
110}
111
Colm Donelane49755b2020-01-29 15:22:43 +0000112IBackendInternal::IBackendProfilingContextPtr NeonBackend::CreateBackendProfilingContext(
Colm Donelan1aff3932020-02-05 17:48:59 +0000113 const IRuntime::CreationOptions&, IBackendProfilingPtr&)
Colm Donelane49755b2020-01-29 15:22:43 +0000114{
115 return IBackendProfilingContextPtr{};
116}
117
Sadik Armagan045f6be2020-09-10 13:37:32 +0100118IBackendInternal::IBackendSpecificModelContextPtr NeonBackend::CreateBackendSpecificModelContext(
119 const ModelOptions& modelOptions) const
120{
121 return IBackendSpecificModelContextPtr{new NeonBackendModelContext{modelOptions}};
122}
123
David Beck111b5d92018-11-12 14:59:37 +0000124IBackendInternal::ILayerSupportSharedPtr NeonBackend::GetLayerSupport() const
125{
Sadik Armagan045f6be2020-09-10 13:37:32 +0100126 static ILayerSupportSharedPtr layerSupport
127 {
128 new NeonLayerSupport(IBackendInternal::IBackendSpecificModelContextPtr{})
129 };
130 return layerSupport;
131}
132
133IBackendInternal::ILayerSupportSharedPtr NeonBackend::GetLayerSupport(const ModelOptions& modelOptions) const
134{
135 static ILayerSupportSharedPtr layerSupport
136 {
137 new NeonLayerSupport(CreateBackendSpecificModelContext(modelOptions))
138 };
David Beck111b5d92018-11-12 14:59:37 +0000139 return layerSupport;
140}
141
Matteo Martincighc3ba50e2019-05-22 14:28:16 +0100142OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph) const
Matteo Martincighadddddb2019-01-24 14:06:23 +0000143{
Matteo Martincighc3ba50e2019-05-22 14:28:16 +0100144 OptimizationViews optimizationViews;
Matteo Martincighadddddb2019-01-24 14:06:23 +0000145
Francis Murtagh56ccf682021-12-13 18:48:12 +0000146 auto it = subgraph.endIConnectable();
Mike Kelly1ac690a2020-11-17 11:41:38 +0000147 std::map<LayerGuid, Layer*> untouched;
Mike Kelly07810fc2020-11-12 10:58:48 +0000148
Francis Murtagh56ccf682021-12-13 18:48:12 +0000149 while (it != subgraph.beginIConnectable())
Mike Kelly07810fc2020-11-12 10:58:48 +0000150 {
151 --it;
Francis Murtagh56ccf682021-12-13 18:48:12 +0000152 Layer& base = *(PolymorphicDowncast<Layer*>(*it));
Mike Kelly1ac690a2020-11-17 11:41:38 +0000153 untouched.insert({base.GetGuid(), &base});
154 }
155
Francis Murtagh56ccf682021-12-13 18:48:12 +0000156 it = subgraph.endIConnectable();
157 while (it != subgraph.beginIConnectable())
Mike Kelly1ac690a2020-11-17 11:41:38 +0000158 {
159 --it;
Francis Murtagh56ccf682021-12-13 18:48:12 +0000160 Layer& base = *(PolymorphicDowncast<Layer*>(*it));
Mike Kelly07810fc2020-11-12 10:58:48 +0000161
Matthew Sloyan5fc0fd62021-05-03 12:22:03 +0100162 // Fuse activation into previous layer if supported by backend
Mike Kelly07810fc2020-11-12 10:58:48 +0000163 if ((base.GetType() == LayerType::DepthwiseConvolution2d || base.GetType() == LayerType::Convolution2d
164 || base.GetType() == LayerType::BatchNormalization || base.GetType() == LayerType::FullyConnected
165 || base.GetType() == LayerType::Addition || base.GetType() == LayerType::Multiplication
Matthew Sloyanae123062021-05-07 14:18:01 +0000166 || base.GetType() == LayerType::Subtraction || base.GetType() == LayerType::Division)
Mike Kelly07810fc2020-11-12 10:58:48 +0000167 && (base.GetAdditionalInformation<ActivationDescriptor>() == nullptr))
168 {
169 for (auto output = base.BeginOutputSlots(); output != base.EndOutputSlots(); ++output)
170 {
171 if (output->GetNumConnections() == 1)
172 {
173 for (auto&& childInput : output->GetConnections())
174 {
Teresa Charlind672f5d2021-01-18 18:07:57 +0000175 if ((childInput->GetOwningLayer().GetType() == LayerType::Activation) &&
176 (checkDataTypeInputandOutput(childInput->GetOwningLayer())))
Mike Kelly07810fc2020-11-12 10:58:48 +0000177 {
178 Layer& child = childInput->GetOwningLayer();
179
180 auto* activationLayer = PolymorphicDowncast<ActivationLayer*>(&child);
181
182 const std::string name = std::string("fused-") + child.GetName() + std::string("-into-") +
183 base.GetName();
184
185 // Get params from activation layer
186 ActivationDescriptor activationDesc = activationLayer->GetParameters();
187
188 if (base.GetType() == LayerType::Convolution2d)
189 {
190 Convolution2dLayer* baseLayer = PolymorphicDowncast<Convolution2dLayer*>(&base);
191
192 Optional<TensorInfo> biases;
193
194 if (baseLayer->GetParameters().m_BiasEnabled)
195 {
Mike Kelly1ac690a2020-11-17 11:41:38 +0000196 biases = baseLayer->m_Bias->GetTensorInfo();
Mike Kelly07810fc2020-11-12 10:58:48 +0000197 }
198
199 arm_compute::Status status = NeonConvolution2dWorkloadValidate(
200 baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
201 activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
202 baseLayer->GetParameters(),
203 baseLayer->m_Weight->GetTensorInfo(),
204 biases,
205 false,
206 &activationDesc);
207
208 if (status)
209 {
Cathal Corbettcbfd7182021-12-15 17:12:59 +0000210 FuseConvolution2dLayer<Convolution2dLayer>(optimizationViews,
211 baseLayer,
212 activationLayer,
213 activationDesc,
214 name);
Mike Kelly1ac690a2020-11-17 11:41:38 +0000215 untouched.erase(baseLayer->GetGuid());
216 untouched.erase(activationLayer->GetGuid());
Mike Kelly07810fc2020-11-12 10:58:48 +0000217 }
218 }
219 else if (base.GetType() == LayerType::DepthwiseConvolution2d)
220 {
221 DepthwiseConvolution2dLayer* baseLayer =
222 PolymorphicDowncast<DepthwiseConvolution2dLayer*>(&base);
223
224 Optional<TensorInfo> biases;
225
226 if (baseLayer->GetParameters().m_BiasEnabled)
227 {
Mike Kelly1ac690a2020-11-17 11:41:38 +0000228 biases = baseLayer->m_Bias->GetTensorInfo();
Mike Kelly07810fc2020-11-12 10:58:48 +0000229 }
230
231 arm_compute::Status status = NeonDepthwiseConvolutionWorkloadValidate(
232 baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
233 activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
234 baseLayer->GetParameters(),
235 baseLayer->m_Weight->GetTensorInfo(),
236 biases,
237 &activationDesc);
238
239 if (status)
240 {
Cathal Corbettcbfd7182021-12-15 17:12:59 +0000241 FuseDepthwiseConvolution2dLayer<DepthwiseConvolution2dLayer>(optimizationViews,
242 baseLayer,
243 activationLayer,
244 activationDesc,
245 name);
Mike Kelly1ac690a2020-11-17 11:41:38 +0000246 untouched.erase(baseLayer->GetGuid());
247 untouched.erase(activationLayer->GetGuid());
Mike Kelly07810fc2020-11-12 10:58:48 +0000248 }
249 }
250 else if (base.GetType() == LayerType::FullyConnected)
251 {
252 FullyConnectedLayer* baseLayer = PolymorphicDowncast<FullyConnectedLayer*>(&base);
Matthew Bentham67d63902022-02-08 15:03:07 +0000253 Optional<TensorInfo> biases;
254
255 if (baseLayer->GetParameters().m_BiasEnabled)
256 {
257 biases = baseLayer->m_Bias->GetTensorInfo();
258 }
Mike Kelly07810fc2020-11-12 10:58:48 +0000259
260 arm_compute::Status status = NeonFullyConnectedWorkloadValidate(
261 baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
262 activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
263 baseLayer->m_Weight->GetTensorInfo(),
Matthew Bentham67d63902022-02-08 15:03:07 +0000264 biases,
Mike Kelly07810fc2020-11-12 10:58:48 +0000265 baseLayer->GetParameters(),
266 &activationDesc);
267
268 if (status)
269 {
Cathal Corbettcbfd7182021-12-15 17:12:59 +0000270 FuseFullyConnectedLayer<FullyConnectedLayer>(optimizationViews,
271 baseLayer,
272 activationLayer,
273 activationDesc,
274 name);
Mike Kelly1ac690a2020-11-17 11:41:38 +0000275 untouched.erase(baseLayer->GetGuid());
276 untouched.erase(activationLayer->GetGuid());
Mike Kelly07810fc2020-11-12 10:58:48 +0000277 }
278 }
279 else if (base.GetType() == LayerType::BatchNormalization)
280 {
281 BatchNormalizationLayer* baseLayer =
282 PolymorphicDowncast<BatchNormalizationLayer*>(&base);
283
284 arm_compute::Status status = NeonBatchNormalizationValidate(
285 baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
286 activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
287 baseLayer->m_Mean->GetTensorInfo(),
288 baseLayer->m_Variance->GetTensorInfo(),
289 baseLayer->m_Beta->GetTensorInfo(),
290 baseLayer->m_Gamma->GetTensorInfo(),
291 baseLayer->GetParameters(),
292 &activationDesc);
293
294 if (status)
295 {
296 BatchNormalizationLayer* replacementLayer =
Cathal Corbettcbfd7182021-12-15 17:12:59 +0000297 FuseBatchNormalizationLayer<BatchNormalizationLayer>(optimizationViews,
298 baseLayer,
299 activationLayer,
300 activationDesc,
301 name);
Mike Kelly07810fc2020-11-12 10:58:48 +0000302
303 replacementLayer->m_Beta = std::move(baseLayer->m_Beta);
304 replacementLayer->m_Gamma = std::move(baseLayer->m_Gamma);
305 replacementLayer->m_Mean = std::move(baseLayer->m_Mean);
306 replacementLayer->m_Variance = std::move(baseLayer->m_Variance);
Mike Kelly1ac690a2020-11-17 11:41:38 +0000307 untouched.erase(baseLayer->GetGuid());
308 untouched.erase(activationLayer->GetGuid());
Mike Kelly07810fc2020-11-12 10:58:48 +0000309 }
310 }
311 else if (base.GetType() == LayerType::Addition)
312 {
313 AdditionLayer* baseLayer = PolymorphicDowncast<AdditionLayer*>(&base);
314
315 arm_compute::Status status = NeonAdditionWorkloadValidate(
316 baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
317 baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
318 activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
319 &activationDesc);
320
321 if (status)
322 {
Cathal Corbettcbfd7182021-12-15 17:12:59 +0000323 FuseAdditionLayer<AdditionLayer>(optimizationViews,
324 baseLayer,
325 activationLayer,
326 activationDesc,
327 name);
Mike Kelly1ac690a2020-11-17 11:41:38 +0000328 untouched.erase(baseLayer->GetGuid());
329 untouched.erase(activationLayer->GetGuid());
Mike Kelly07810fc2020-11-12 10:58:48 +0000330 }
331 }
332 else if (base.GetType() == LayerType::Division)
333 {
334 DivisionLayer* baseLayer = PolymorphicDowncast<DivisionLayer*>(&base);
335
336 arm_compute::Status status = NeonDivisionWorkloadValidate(
337 baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
338 baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
339 activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
340 &activationDesc);
341
342 if (status)
343 {
Cathal Corbettcbfd7182021-12-15 17:12:59 +0000344 FuseDivisionLayer<DivisionLayer>(optimizationViews,
345 baseLayer,
346 activationLayer,
347 activationDesc,
348 name);
Mike Kelly1ac690a2020-11-17 11:41:38 +0000349 untouched.erase(baseLayer->GetGuid());
350 untouched.erase(activationLayer->GetGuid());
Mike Kelly07810fc2020-11-12 10:58:48 +0000351 }
352 }
353 else if (base.GetType() == LayerType::Multiplication)
354 {
355 MultiplicationLayer* baseLayer = PolymorphicDowncast<MultiplicationLayer*>(&base);
356
357 arm_compute::Status status = NeonMultiplicationWorkloadValidate(
358 baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
359 baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
360 activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
361 &activationDesc);
362
363 if (status)
364 {
Cathal Corbettcbfd7182021-12-15 17:12:59 +0000365 FuseMultiplicationLayer<MultiplicationLayer>(optimizationViews,
366 baseLayer,
367 activationLayer,
368 activationDesc,
369 name);
Mike Kelly1ac690a2020-11-17 11:41:38 +0000370 untouched.erase(baseLayer->GetGuid());
371 untouched.erase(activationLayer->GetGuid());
Mike Kelly07810fc2020-11-12 10:58:48 +0000372 }
373 }
374 else if (base.GetType() == LayerType::Subtraction)
375 {
376 SubtractionLayer* baseLayer = PolymorphicDowncast<SubtractionLayer*>(&base);
377
378 arm_compute::Status status = NeonSubtractionWorkloadValidate(
379 baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
380 baseLayer->GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo(),
381 activationLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
382 &activationDesc);
383
384 if (status)
385 {
Cathal Corbettcbfd7182021-12-15 17:12:59 +0000386 FuseSubtractionLayer<SubtractionLayer>(optimizationViews,
387 baseLayer,
388 activationLayer,
389 activationDesc,
390 name);
Mike Kelly1ac690a2020-11-17 11:41:38 +0000391 untouched.erase(baseLayer->GetGuid());
392 untouched.erase(activationLayer->GetGuid());
Mike Kelly07810fc2020-11-12 10:58:48 +0000393 }
394 }
395 }
396 }
397 }
398 }
399 }
Matthew Sloyan5fc0fd62021-05-03 12:22:03 +0100400
401 // Separate reduce layer with multiple axes into multiple reduce layers with 1 axis.
402 if (base.GetType() == LayerType::Reduce)
403 {
404 ReduceLayer* baseLayer = PolymorphicDowncast<ReduceLayer*>(&base);
405 ReduceDescriptor reduceDescriptor = baseLayer->GetParameters();
406
407 if (!reduceDescriptor.m_vAxis.empty() && reduceDescriptor.m_vAxis.size() > 1)
408 {
409 // Add new layers to the graph and connect them.
Francis Murtagh56ccf682021-12-13 18:48:12 +0000410 std::vector<IConnectableLayer*> layers = ChainReduceLayers<ReduceLayer>(optimizationViews,
411 baseLayer,
412 reduceDescriptor);
Matthew Sloyan5fc0fd62021-05-03 12:22:03 +0100413
414 // Replace existing baselayer with new subgraph.
415 ReplaceLayers<ReduceLayer>(optimizationViews, baseLayer, layers);
416 untouched.erase(baseLayer->GetGuid());
417 }
418 }
Mike Kelly07810fc2020-11-12 10:58:48 +0000419 }
420
421 if (optimizationViews.GetSubstitutions().empty())
422 {
423 optimizationViews.AddUntouchedSubgraph(SubgraphView(subgraph));
424 }
Mike Kelly1ac690a2020-11-17 11:41:38 +0000425 else
426 {
427 ReportUntouchedLayers(optimizationViews, untouched);
428 }
Matteo Martincighc3ba50e2019-05-22 14:28:16 +0100429
430 return optimizationViews;
Matteo Martincighadddddb2019-01-24 14:06:23 +0000431}
432
Narumol Prangnawarat4e3e8182019-08-14 12:25:50 +0100433std::vector<ITensorHandleFactory::FactoryId> NeonBackend::GetHandleFactoryPreferences() const
434{
Narumol Prangnawarat265e53e2020-10-30 16:06:55 +0000435 return std::vector<ITensorHandleFactory::FactoryId>() = { NeonTensorHandleFactory::GetIdStatic() };
Narumol Prangnawarat4e3e8182019-08-14 12:25:50 +0100436}
437
438void NeonBackend::RegisterTensorHandleFactories(class TensorHandleFactoryRegistry& registry)
439{
440 auto memoryManager = std::make_shared<NeonMemoryManager>(std::make_unique<arm_compute::Allocator>(),
441 BaseMemoryManager::MemoryAffinity::Offset);
442
443 registry.RegisterMemoryManager(memoryManager);
Narumol Prangnawarat77400452022-01-13 17:43:41 +0000444
445 auto factory = std::make_unique<NeonTensorHandleFactory>(memoryManager);
446 // Register copy and import factory pair
447 registry.RegisterCopyAndImportFactoryPair(factory->GetId(), factory->GetId());
448 // Register the factory
449 registry.RegisterFactory(std::move(factory));
Narumol Prangnawarat4e3e8182019-08-14 12:25:50 +0100450}
451
Francis Murtaghe8d7ccb2021-10-14 17:30:24 +0100452std::unique_ptr<ICustomAllocator> NeonBackend::GetDefaultAllocator() const
453{
454 return std::make_unique<DefaultAllocator>();
455}
456
457
Matthew Bentham42bad952018-12-17 09:23:36 +0000458} // namespace armnn