blob: 400ae1880718df15bb53b1c12fe2542cb03e7675 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
Nattapat Chaimanowong77140882018-10-17 11:12:19 +01006#include "NeonDepthwiseConvolutionWorkload.hpp"
7
Matthew Benthamd80a7122019-01-08 17:52:37 +00008#include "NeonWorkloadUtils.hpp"
9
10#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000011#include <aclCommon/ArmComputeTensorUtils.hpp>
12#include <neon/NeonLayerSupport.hpp>
13#include <backendsCommon/CpuTensorHandle.hpp>
Matteo Martincigh747ef822018-12-18 09:26:39 +000014#include <backendsCommon/WorkloadUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000015
Matthew Benthamd80a7122019-01-08 17:52:37 +000016#include <arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h>
17
18using namespace armnnUtils;
19
telsoa014fcda012018-03-09 14:13:49 +000020namespace armnn
21{
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010022
telsoa014fcda012018-03-09 14:13:49 +000023using namespace armcomputetensorutils;
24
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010025arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& input,
Matteo Martincigh747ef822018-12-18 09:26:39 +000026 const TensorInfo& output,
27 const DepthwiseConvolution2dDescriptor& descriptor,
28 const TensorInfo& weights,
29 const Optional<TensorInfo>& biases)
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010030{
Matteo Martincigh747ef822018-12-18 09:26:39 +000031 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
32 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
33
34 // ArmNN's weight format is [ M, I, H, W ]
35 const unsigned int aclDepthMultiplier = weights.GetShape()[0];
36
37 // Convert the weight format from ArmNN's [ M, I, H, W ] (does NOT depend on the data layout) to either
38 // [ 1, H, W, I * M ] (if NHWC) or [ 1, I * M, H, W ] (if NCHW), as required by the compute library
39 TensorInfo weightsPermuted = ConvertWeightTensorInfoFromArmnnToAcl(weights, descriptor.m_DataLayout);
40
41 // Convert the weights into the compute library format
42 const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weightsPermuted, descriptor.m_DataLayout);
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010043
44 arm_compute::TensorInfo aclBiasesInfo;
45 arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
46
47 if (descriptor.m_BiasEnabled)
48 {
49 BOOST_ASSERT(biases.has_value());
50
51 aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
52 optionalAclBiasesInfo = &aclBiasesInfo;
53 }
54
Pablo Tellof0bd6832019-04-26 17:58:13 +010055 arm_compute::PadStrideInfo aclPadStrideInfo = BuildArmComputePadStrideInfo(descriptor);
56 const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(
57 descriptor.m_DilationX,descriptor.m_DilationY);
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010058
59 return arm_compute::NEDepthwiseConvolutionLayer::validate(&aclInputInfo,
60 &aclWeightsInfo,
61 optionalAclBiasesInfo,
62 &aclOutputInfo,
63 aclPadStrideInfo,
Pablo Tellof0bd6832019-04-26 17:58:13 +010064 aclDepthMultiplier,
65 arm_compute::ActivationLayerInfo(),
66 aclDilationInfo);
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010067}
68
69NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
telsoa014fcda012018-03-09 14:13:49 +000070 const DepthwiseConvolution2dQueueDescriptor& descriptor,
71 const WorkloadInfo& info)
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010072 : BaseWorkload<DepthwiseConvolution2dQueueDescriptor>(descriptor, info)
telsoa014fcda012018-03-09 14:13:49 +000073{
Matteo Martincigh747ef822018-12-18 09:26:39 +000074 // ArmNN's weight format is [ M, I, H, W ]
75 auto& weightInfo = m_Data.m_Weight->GetTensorInfo();
telsoa014fcda012018-03-09 14:13:49 +000076
Matteo Martincigh747ef822018-12-18 09:26:39 +000077 // Allocate a buffer for the swizzling of the weight tensor
78 std::unique_ptr<unsigned char[]> permuteBuffer(new unsigned char[m_Data.m_Weight->GetTensorInfo().GetNumBytes()]);
79
80 // Convert the weight format from ArmNN's [ M, I, H, W ] (does NOT depend on the data layout) to either
81 // [ 1, H, W, I * M ] (if NHWC) or [ 1, I * M, H, W ] (if NCHW), as required by the compute library
82 ConstTensor weightPermuted = ConvertWeightTensorFromArmnnToAcl(m_Data.m_Weight,
83 m_Data.m_Parameters.m_DataLayout,
84 permuteBuffer.get());
85
86 // Convert the weights into the compute library format
telsoa01c577f2c2018-08-31 09:22:23 +010087 m_KernelTensor = std::make_unique<arm_compute::Tensor>();
Matteo Martincigh747ef822018-12-18 09:26:39 +000088 BuildArmComputeTensor(*m_KernelTensor, weightPermuted.GetInfo(), m_Data.m_Parameters.m_DataLayout);
Mohamed Nour Abouelseoud7e7261e2018-11-27 17:35:35 +000089
telsoa014fcda012018-03-09 14:13:49 +000090 if (m_Data.m_Parameters.m_BiasEnabled)
91 {
telsoa01c577f2c2018-08-31 09:22:23 +010092 m_BiasTensor = std::make_unique<arm_compute::Tensor>();
Nikhil Rajcec6b652018-10-12 13:51:57 +010093 BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
telsoa014fcda012018-03-09 14:13:49 +000094 }
95
Pablo Tellof0bd6832019-04-26 17:58:13 +010096 const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(
97 m_Data.m_Parameters.m_DilationX, m_Data.m_Parameters.m_DilationY);
98
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010099 m_Data.ValidateInputsOutputs("NeonDepthwiseConvolutionWorkload", 1, 1);
telsoa014fcda012018-03-09 14:13:49 +0000100
Derek Lambertic81855f2019-06-13 17:34:19 +0100101 IAclTensorHandle* inputTensorHandle = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0]);
102 IAclTensorHandle* outputTensorHandle = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[0]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000103
Mohamed Nour Abouelseoud7e7261e2018-11-27 17:35:35 +0000104 arm_compute::ITensor& input = inputTensorHandle->GetTensor();
105 arm_compute::ITensor& output = outputTensorHandle->GetTensor();
telsoa014fcda012018-03-09 14:13:49 +0000106
Nikhil Rajcec6b652018-10-12 13:51:57 +0100107 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
108 input.info()->set_data_layout(aclDataLayout);
109 output.info()->set_data_layout(aclDataLayout);
110
Bruno Goncalves22972f02019-04-26 21:03:24 -0300111 // Get the depth multiplier
Matteo Martincigh747ef822018-12-18 09:26:39 +0000112 const unsigned int depthMultiplier = weightInfo.GetShape()[0];
Mohamed Nour Abouelseoud7e7261e2018-11-27 17:35:35 +0000113
Aron Virginas-Tar6f3785d2019-07-22 15:30:22 +0100114 arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
115
Matteo Martincigh747ef822018-12-18 09:26:39 +0000116 // Check for optimisation opportunities.
Pablo Tellob38ed402019-07-26 13:45:48 +0100117 const bool use3x3Optimisation = (weightInfo.GetShape()[2] == 3) && (weightInfo.GetShape()[3] == 3);
118 const bool use5x5Optimisation = (weightInfo.GetShape()[2] == 5) && (weightInfo.GetShape()[3] == 5);
119
120 if (use3x3Optimisation||use5x5Optimisation)
telsoa014fcda012018-03-09 14:13:49 +0000121 {
Aron Virginas-Tar974e5b62019-07-11 14:57:01 +0100122 m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::NEDepthwiseConvolutionLayerOptimized>();
123 static_cast<arm_compute::NEDepthwiseConvolutionLayerOptimized*>(
telsoa014fcda012018-03-09 14:13:49 +0000124 m_pDepthwiseConvolutionLayer.get())->configure(&input,
telsoa01c577f2c2018-08-31 09:22:23 +0100125 m_KernelTensor.get(),
126 m_BiasTensor.get(),
telsoa014fcda012018-03-09 14:13:49 +0000127 &output,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000128 padStrideInfo,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100129 depthMultiplier,
130 arm_compute::ActivationLayerInfo(),
131 aclDilationInfo);
telsoa014fcda012018-03-09 14:13:49 +0000132 }
133 else
134 {
135 m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::NEDepthwiseConvolutionLayer>();
136 static_cast<arm_compute::NEDepthwiseConvolutionLayer*>(
137 m_pDepthwiseConvolutionLayer.get())->configure(&input,
telsoa01c577f2c2018-08-31 09:22:23 +0100138 m_KernelTensor.get(),
139 m_BiasTensor.get(),
telsoa014fcda012018-03-09 14:13:49 +0000140 &output,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000141 padStrideInfo,
Pablo Tellof0bd6832019-04-26 17:58:13 +0100142 depthMultiplier,
143 arm_compute::ActivationLayerInfo(),
144 aclDilationInfo);
telsoa014fcda012018-03-09 14:13:49 +0000145 }
146
147 BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
148
Matteo Martincigh747ef822018-12-18 09:26:39 +0000149 ScopedCpuTensorHandle weightsPermutedHandle(weightPermuted);
150 InitializeArmComputeTensorData(*m_KernelTensor, &weightsPermutedHandle);
telsoa014fcda012018-03-09 14:13:49 +0000151
Mohamed Nour Abouelseoud7e7261e2018-11-27 17:35:35 +0000152 if (m_Data.m_Parameters.m_BiasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000153 {
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +0100154 InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
telsoa014fcda012018-03-09 14:13:49 +0000155 }
telsoa01c577f2c2018-08-31 09:22:23 +0100156
157 m_pDepthwiseConvolutionLayer->prepare();
158 FreeUnusedTensors();
telsoa014fcda012018-03-09 14:13:49 +0000159}
160
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100161void NeonDepthwiseConvolutionWorkload::Execute() const
telsoa014fcda012018-03-09 14:13:49 +0000162{
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100163 ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonDepthwiseConvolutionWorkload_Execute");
telsoa014fcda012018-03-09 14:13:49 +0000164 BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
165
166 m_pDepthwiseConvolutionLayer->run();
167}
168
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100169void NeonDepthwiseConvolutionWorkload::FreeUnusedTensors()
telsoa01c577f2c2018-08-31 09:22:23 +0100170{
171 FreeTensorIfUnused(m_KernelTensor);
172 FreeTensorIfUnused(m_BiasTensor);
173}
174
telsoa014fcda012018-03-09 14:13:49 +0000175} //namespace armnn