blob: 7b52f2784f296e3e3582cebbe5a87b0a9ba73ac6 [file] [log] [blame]
Matthew Benthamd8067922018-10-03 17:18:04 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "ClConvolution2dWorkload.hpp"
7
8#include "ClWorkloadUtils.hpp"
9
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <cl/ClLayerSupport.hpp>
11#include <cl/ClTensorHandle.hpp>
12#include <cl/ClLayerSupport.hpp>
13#include <aclCommon/ArmComputeUtils.hpp>
14#include <aclCommon/ArmComputeTensorUtils.hpp>
15#include <backendsCommon/CpuTensorHandle.hpp>
Matthew Benthamd8067922018-10-03 17:18:04 +010016
17#include <arm_compute/runtime/CL/functions/CLConvolutionLayer.h>
18
19namespace armnn
20{
21using namespace armcomputetensorutils;
22
23arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
24 const TensorInfo& output,
25 const Convolution2dDescriptor& descriptor,
26 const TensorInfo& weights,
Sadik Armagan045f6be2020-09-10 13:37:32 +010027 const Optional<TensorInfo>& biases,
28 bool isFastMathEnabled)
Matthew Benthamd8067922018-10-03 17:18:04 +010029{
30 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
31 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
32 const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout);
33
Jan Eilers4b961d32019-07-11 09:19:35 +010034 const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(descriptor.m_DilationX,
35 descriptor.m_DilationY);
36
Matthew Benthamd8067922018-10-03 17:18:04 +010037 arm_compute::TensorInfo aclBiasesInfo;
38 arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
39
40 if (descriptor.m_BiasEnabled)
41 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010042 ARMNN_ASSERT(biases.has_value());
Matthew Benthamd8067922018-10-03 17:18:04 +010043
David Beck5eec11d2018-10-04 15:43:17 +010044 aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
Matthew Benthamd8067922018-10-03 17:18:04 +010045 optionalAclBiasesInfo = &aclBiasesInfo;
46 }
47
48 arm_compute::PadStrideInfo layerInfo = BuildArmComputePadStrideInfo(descriptor);
49
50 return arm_compute::CLConvolutionLayer::validate(&aclInputInfo,
51 &aclWeightsInfo,
52 optionalAclBiasesInfo,
53 &aclOutputInfo,
Jan Eilers4b961d32019-07-11 09:19:35 +010054 layerInfo,
55 arm_compute::WeightsInfo(),
Sadik Armagan045f6be2020-09-10 13:37:32 +010056 aclDilationInfo,
57 arm_compute::ActivationLayerInfo(),
58 isFastMathEnabled);
Matthew Benthamd8067922018-10-03 17:18:04 +010059}
60
61ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescriptor& descriptor,
Sadik Armagan04a72972020-09-14 15:44:18 +010062 const WorkloadInfo& info,
63 std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager,
64 const bool isFastMathEnabled)
Matthew Benthamd8067922018-10-03 17:18:04 +010065 : BaseWorkload<Convolution2dQueueDescriptor>(descriptor, info)
66 , m_ConvolutionLayer(memoryManager)
67{
68 // todo: check tensor shapes match.
69 const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo();
70
71 m_KernelTensor = std::make_unique<arm_compute::CLTensor>();
72 BuildArmComputeTensor(*m_KernelTensor, weightInfo, m_Data.m_Parameters.m_DataLayout);
73
Jan Eilers4b961d32019-07-11 09:19:35 +010074 const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(m_Data.m_Parameters.m_DilationX,
75 m_Data.m_Parameters.m_DilationY);
76
Matthew Benthamd8067922018-10-03 17:18:04 +010077 if (m_Data.m_Parameters.m_BiasEnabled)
78 {
79 m_BiasTensor = std::make_unique<arm_compute::CLTensor>();
80 BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
81 }
82
83 m_Data.ValidateInputsOutputs("ClConvolution2dWorkload", 1, 1);
84
85 arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
86 arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
87
88 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
89 input.info()->set_data_layout(aclDataLayout);
90 output.info()->set_data_layout(aclDataLayout);
91
Aron Virginas-Tar6f3785d2019-07-22 15:30:22 +010092 arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
93
Matthew Benthamd8067922018-10-03 17:18:04 +010094 m_ConvolutionLayer.configure(&input,
95 m_KernelTensor.get(),
96 m_BiasTensor.get(),
97 &output,
Jan Eilers4b961d32019-07-11 09:19:35 +010098 padStrideInfo,
99 arm_compute::WeightsInfo(),
Sadik Armagan04a72972020-09-14 15:44:18 +0100100 aclDilationInfo,
101 arm_compute::ActivationLayerInfo(),
102 isFastMathEnabled);
103
104 m_ConvolutionMethod =
105 m_ConvolutionLayer.get_convolution_method(input.info(),
106 m_KernelTensor->info(),
107 output.info(),
108 padStrideInfo,
109 arm_compute::WeightsInfo(),
110 arm_compute::ActivationLayerInfo(),
111 arm_compute::CLScheduler::get().target(),
112 aclDilationInfo,
113 isFastMathEnabled);
Matthew Benthamd8067922018-10-03 17:18:04 +0100114
115 InitializeArmComputeClTensorData(*m_KernelTensor, m_Data.m_Weight);
116
117 if (m_BiasTensor)
118 {
119 InitializeArmComputeClTensorData(*m_BiasTensor, m_Data.m_Bias);
120 }
121
122 // Force Compute Library to perform the necessary copying and reshaping, after which
123 // delete all the input tensors that will no longer be needed
124 m_ConvolutionLayer.prepare();
125 FreeUnusedTensors();
126}
127
128void ClConvolution2dWorkload::Execute() const
129{
130 ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvolution2dWorkload_Execute");
Aron Virginas-Tara8e06ed2018-10-19 16:46:15 +0100131 RunClFunction(m_ConvolutionLayer, CHECK_LOCATION());
Matthew Benthamd8067922018-10-03 17:18:04 +0100132}
133
Sadik Armagan04a72972020-09-14 15:44:18 +0100134arm_compute::ConvolutionMethod ClConvolution2dWorkload::GetConvolutionMethod() const
135{
136 return m_ConvolutionMethod;
137}
138
Matthew Benthamd8067922018-10-03 17:18:04 +0100139void ClConvolution2dWorkload::FreeUnusedTensors()
140{
141 FreeTensorIfUnused(m_KernelTensor);
142 FreeTensorIfUnused(m_BiasTensor);
143}
144
145} //namespace armnn