blob: c915555dd796f3365513f9fca1814395ffe8c8bf [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
Nattapat Chaimanowong77140882018-10-17 11:12:19 +01006#include "NeonDepthwiseConvolutionWorkload.hpp"
7
Matthew Benthamd80a7122019-01-08 17:52:37 +00008#include "NeonWorkloadUtils.hpp"
9
10#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000011#include <aclCommon/ArmComputeTensorUtils.hpp>
12#include <neon/NeonLayerSupport.hpp>
13#include <backendsCommon/CpuTensorHandle.hpp>
Matteo Martincigh747ef822018-12-18 09:26:39 +000014#include <backendsCommon/WorkloadUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000015
Matthew Benthamd80a7122019-01-08 17:52:37 +000016#include <arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h>
17
18using namespace armnnUtils;
19
telsoa014fcda012018-03-09 14:13:49 +000020namespace armnn
21{
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010022
telsoa014fcda012018-03-09 14:13:49 +000023using namespace armcomputetensorutils;
24
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010025arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& input,
Matteo Martincigh747ef822018-12-18 09:26:39 +000026 const TensorInfo& output,
27 const DepthwiseConvolution2dDescriptor& descriptor,
28 const TensorInfo& weights,
29 const Optional<TensorInfo>& biases)
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010030{
Matteo Martincigh747ef822018-12-18 09:26:39 +000031 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
32 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
33
34 // ArmNN's weight format is [ M, I, H, W ]
35 const unsigned int aclDepthMultiplier = weights.GetShape()[0];
36
37 // Convert the weight format from ArmNN's [ M, I, H, W ] (does NOT depend on the data layout) to either
38 // [ 1, H, W, I * M ] (if NHWC) or [ 1, I * M, H, W ] (if NCHW), as required by the compute library
39 TensorInfo weightsPermuted = ConvertWeightTensorInfoFromArmnnToAcl(weights, descriptor.m_DataLayout);
40
41 // Convert the weights into the compute library format
42 const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weightsPermuted, descriptor.m_DataLayout);
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010043
44 arm_compute::TensorInfo aclBiasesInfo;
45 arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
46
47 if (descriptor.m_BiasEnabled)
48 {
49 BOOST_ASSERT(biases.has_value());
50
51 aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
52 optionalAclBiasesInfo = &aclBiasesInfo;
53 }
54
Matteo Martincigh747ef822018-12-18 09:26:39 +000055 const arm_compute::PadStrideInfo aclPadStrideInfo = BuildArmComputePadStrideInfo(descriptor);
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010056
57 return arm_compute::NEDepthwiseConvolutionLayer::validate(&aclInputInfo,
58 &aclWeightsInfo,
59 optionalAclBiasesInfo,
60 &aclOutputInfo,
61 aclPadStrideInfo,
62 aclDepthMultiplier);
63}
64
65NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
telsoa014fcda012018-03-09 14:13:49 +000066 const DepthwiseConvolution2dQueueDescriptor& descriptor,
67 const WorkloadInfo& info)
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010068 : BaseWorkload<DepthwiseConvolution2dQueueDescriptor>(descriptor, info)
telsoa014fcda012018-03-09 14:13:49 +000069{
Matteo Martincigh747ef822018-12-18 09:26:39 +000070 // ArmNN's weight format is [ M, I, H, W ]
71 auto& weightInfo = m_Data.m_Weight->GetTensorInfo();
telsoa014fcda012018-03-09 14:13:49 +000072
Matteo Martincigh747ef822018-12-18 09:26:39 +000073 // Allocate a buffer for the swizzling of the weight tensor
74 std::unique_ptr<unsigned char[]> permuteBuffer(new unsigned char[m_Data.m_Weight->GetTensorInfo().GetNumBytes()]);
75
76 // Convert the weight format from ArmNN's [ M, I, H, W ] (does NOT depend on the data layout) to either
77 // [ 1, H, W, I * M ] (if NHWC) or [ 1, I * M, H, W ] (if NCHW), as required by the compute library
78 ConstTensor weightPermuted = ConvertWeightTensorFromArmnnToAcl(m_Data.m_Weight,
79 m_Data.m_Parameters.m_DataLayout,
80 permuteBuffer.get());
81
82 // Convert the weights into the compute library format
telsoa01c577f2c2018-08-31 09:22:23 +010083 m_KernelTensor = std::make_unique<arm_compute::Tensor>();
Matteo Martincigh747ef822018-12-18 09:26:39 +000084 BuildArmComputeTensor(*m_KernelTensor, weightPermuted.GetInfo(), m_Data.m_Parameters.m_DataLayout);
Mohamed Nour Abouelseoud7e7261e2018-11-27 17:35:35 +000085
telsoa014fcda012018-03-09 14:13:49 +000086 if (m_Data.m_Parameters.m_BiasEnabled)
87 {
telsoa01c577f2c2018-08-31 09:22:23 +010088 m_BiasTensor = std::make_unique<arm_compute::Tensor>();
Nikhil Rajcec6b652018-10-12 13:51:57 +010089 BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
telsoa014fcda012018-03-09 14:13:49 +000090 }
91
92 arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
93 m_Data.m_Parameters.m_StrideY,
94 m_Data.m_Parameters.m_PadLeft,
95 m_Data.m_Parameters.m_PadRight,
96 m_Data.m_Parameters.m_PadTop,
97 m_Data.m_Parameters.m_PadBottom,
98 arm_compute::DimensionRoundingType::FLOOR);
99
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100100 m_Data.ValidateInputsOutputs("NeonDepthwiseConvolutionWorkload", 1, 1);
telsoa014fcda012018-03-09 14:13:49 +0000101
Matteo Martincigh747ef822018-12-18 09:26:39 +0000102 INeonTensorHandle* inputTensorHandle = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[0]);
103 INeonTensorHandle* outputTensorHandle = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[0]);
104
Mohamed Nour Abouelseoud7e7261e2018-11-27 17:35:35 +0000105 arm_compute::ITensor& input = inputTensorHandle->GetTensor();
106 arm_compute::ITensor& output = outputTensorHandle->GetTensor();
telsoa014fcda012018-03-09 14:13:49 +0000107
Nikhil Rajcec6b652018-10-12 13:51:57 +0100108 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
109 input.info()->set_data_layout(aclDataLayout);
110 output.info()->set_data_layout(aclDataLayout);
111
Matteo Martincigh747ef822018-12-18 09:26:39 +0000112 // Get the depth multiplier
113 const unsigned int depthMultiplier = weightInfo.GetShape()[0];
Mohamed Nour Abouelseoud7e7261e2018-11-27 17:35:35 +0000114
Matteo Martincigh747ef822018-12-18 09:26:39 +0000115 // Check for optimisation opportunities.
116 bool use3x3Optimisation = (weightInfo.GetShape()[2] == 3) && (weightInfo.GetShape()[3] == 3);
telsoa014fcda012018-03-09 14:13:49 +0000117 if (use3x3Optimisation)
118 {
119 m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::NEDepthwiseConvolutionLayer3x3>();
120 static_cast<arm_compute::NEDepthwiseConvolutionLayer3x3*>(
121 m_pDepthwiseConvolutionLayer.get())->configure(&input,
telsoa01c577f2c2018-08-31 09:22:23 +0100122 m_KernelTensor.get(),
123 m_BiasTensor.get(),
telsoa014fcda012018-03-09 14:13:49 +0000124 &output,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000125 padStrideInfo,
126 depthMultiplier);
telsoa014fcda012018-03-09 14:13:49 +0000127 }
128 else
129 {
130 m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::NEDepthwiseConvolutionLayer>();
131 static_cast<arm_compute::NEDepthwiseConvolutionLayer*>(
132 m_pDepthwiseConvolutionLayer.get())->configure(&input,
telsoa01c577f2c2018-08-31 09:22:23 +0100133 m_KernelTensor.get(),
134 m_BiasTensor.get(),
telsoa014fcda012018-03-09 14:13:49 +0000135 &output,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000136 padStrideInfo,
137 depthMultiplier);
telsoa014fcda012018-03-09 14:13:49 +0000138 }
139
140 BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
141
Matteo Martincigh747ef822018-12-18 09:26:39 +0000142 ScopedCpuTensorHandle weightsPermutedHandle(weightPermuted);
143 InitializeArmComputeTensorData(*m_KernelTensor, &weightsPermutedHandle);
telsoa014fcda012018-03-09 14:13:49 +0000144
Mohamed Nour Abouelseoud7e7261e2018-11-27 17:35:35 +0000145 if (m_Data.m_Parameters.m_BiasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000146 {
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +0100147 InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
telsoa014fcda012018-03-09 14:13:49 +0000148 }
telsoa01c577f2c2018-08-31 09:22:23 +0100149
150 m_pDepthwiseConvolutionLayer->prepare();
151 FreeUnusedTensors();
telsoa014fcda012018-03-09 14:13:49 +0000152}
153
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100154void NeonDepthwiseConvolutionWorkload::Execute() const
telsoa014fcda012018-03-09 14:13:49 +0000155{
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100156 ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonDepthwiseConvolutionWorkload_Execute");
telsoa014fcda012018-03-09 14:13:49 +0000157 BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
158
159 m_pDepthwiseConvolutionLayer->run();
160}
161
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100162void NeonDepthwiseConvolutionWorkload::FreeUnusedTensors()
telsoa01c577f2c2018-08-31 09:22:23 +0100163{
164 FreeTensorIfUnused(m_KernelTensor);
165 FreeTensorIfUnused(m_BiasTensor);
166}
167
telsoa014fcda012018-03-09 14:13:49 +0000168} //namespace armnn