blob: 8fcc0956e297bb6c41415a8753a65e87e92aa451 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
arovir019e53a352018-08-31 15:26:35 +01006#include "NeonFullyConnectedFloatWorkload.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01007
David Beck711fa312018-09-24 10:46:38 +01008#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
9#include <backends/aclCommon/ArmComputeUtils.hpp>
10#include <backends/CpuTensorHandle.hpp>
telsoa014fcda012018-03-09 14:13:49 +000011
telsoa014fcda012018-03-09 14:13:49 +000012namespace armnn
13{
14using namespace armcomputetensorutils;
15
telsoa01c577f2c2018-08-31 09:22:23 +010016arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo& input,
17 const TensorInfo& output,
18 const TensorInfo& weights,
19 const TensorInfo& biases,
20 const FullyConnectedDescriptor& descriptor)
21{
22 const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
23 const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
24 const arm_compute::TensorInfo aclWeights = BuildArmComputeTensorInfo(weights);
25
26 arm_compute::TensorInfo aclBiases;
27 arm_compute::TensorInfo *optionalAclBiases = nullptr;
28 if (descriptor.m_BiasEnabled)
29 {
30 aclBiases = BuildArmComputeTensorInfo(biases);
31 optionalAclBiases = &aclBiases;
32 }
33
34 const arm_compute::FullyConnectedLayerInfo fullyConnectedLayerInfo =
35 ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor);
36
37
38 return arm_compute::NEFullyConnectedLayer::validate(&aclInput,
39 &aclWeights,
40 optionalAclBiases,
41 &aclOutput,
42 fullyConnectedLayerInfo);
43}
44
arovir019e53a352018-08-31 15:26:35 +010045NeonFullyConnectedFloatWorkload::NeonFullyConnectedFloatWorkload(const FullyConnectedQueueDescriptor& descriptor,
surmeh013537c2c2018-05-18 16:31:43 +010046 const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +010047 : FloatWorkload<FullyConnectedQueueDescriptor>(descriptor, info)
surmeh013537c2c2018-05-18 16:31:43 +010048 , m_FullyConnectedLayer(memoryManager)
telsoa014fcda012018-03-09 14:13:49 +000049{
arovir019e53a352018-08-31 15:26:35 +010050 m_Data.ValidateInputsOutputs("NeonFullyConnectedFloatWorkload", 1, 1);
telsoa014fcda012018-03-09 14:13:49 +000051
52 arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
53 arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
54
telsoa01c577f2c2018-08-31 09:22:23 +010055 m_WeightsTensor = std::make_unique<arm_compute::Tensor>();
56 BuildArmComputeTensor(*m_WeightsTensor, m_Data.m_Weight->GetTensorInfo());
telsoa014fcda012018-03-09 14:13:49 +000057
telsoa014fcda012018-03-09 14:13:49 +000058 if (m_Data.m_Parameters.m_BiasEnabled)
59 {
telsoa01c577f2c2018-08-31 09:22:23 +010060 m_BiasesTensor = std::make_unique<arm_compute::Tensor>();
61 BuildArmComputeTensor(*m_BiasesTensor, m_Data.m_Bias->GetTensorInfo());
telsoa014fcda012018-03-09 14:13:49 +000062 }
63
64 // Construct
telsoa01c577f2c2018-08-31 09:22:23 +010065 arm_compute::FullyConnectedLayerInfo fc_info;
66 fc_info.transpose_weights = m_Data.m_Parameters.m_TransposeWeightMatrix;
67 m_FullyConnectedLayer.configure(&input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output, fc_info);
telsoa014fcda012018-03-09 14:13:49 +000068
69 // Allocate
telsoa01c577f2c2018-08-31 09:22:23 +010070 InitializeArmComputeTensorDataForFloatTypes(*m_WeightsTensor, m_Data.m_Weight);
telsoa014fcda012018-03-09 14:13:49 +000071
telsoa01c577f2c2018-08-31 09:22:23 +010072 if (m_BiasesTensor)
telsoa014fcda012018-03-09 14:13:49 +000073 {
telsoa01c577f2c2018-08-31 09:22:23 +010074 InitializeArmComputeTensorDataForFloatTypes(*m_BiasesTensor, m_Data.m_Bias);
telsoa014fcda012018-03-09 14:13:49 +000075 }
telsoa01c577f2c2018-08-31 09:22:23 +010076
77 // Force Compute Library to perform the necessary copying and reshaping, after which
78 // delete all the input tensors that will no longer be needed
79 m_FullyConnectedLayer.prepare();
80 FreeUnusedTensors();
telsoa014fcda012018-03-09 14:13:49 +000081}
82
arovir019e53a352018-08-31 15:26:35 +010083void NeonFullyConnectedFloatWorkload::Execute() const
telsoa014fcda012018-03-09 14:13:49 +000084{
arovir019e53a352018-08-31 15:26:35 +010085 ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonFullyConnectedFloatWorkload_Execute");
telsoa014fcda012018-03-09 14:13:49 +000086 m_FullyConnectedLayer.run();
87}
88
arovir019e53a352018-08-31 15:26:35 +010089void NeonFullyConnectedFloatWorkload::FreeUnusedTensors()
telsoa01c577f2c2018-08-31 09:22:23 +010090{
91 FreeTensorIfUnused(m_WeightsTensor);
92 FreeTensorIfUnused(m_BiasesTensor);
93}
94
telsoa014fcda012018-03-09 14:13:49 +000095} //namespace armnn
96