blob: e39fe54199c0fff07fec4593f6bb9cb574c294b7 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
Nattapat Chaimanowong77140882018-10-17 11:12:19 +01006#include "NeonDepthwiseConvolutionWorkload.hpp"
7
Matthew Benthamd80a7122019-01-08 17:52:37 +00008#include "NeonWorkloadUtils.hpp"
9
Matteo Martincighe011d202019-11-28 11:35:47 +000010#include <armnnUtils/DataLayoutIndexed.hpp>
11
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <aclCommon/ArmComputeTensorUtils.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000013
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000014#include <neon/NeonLayerSupport.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000015
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000016#include <backendsCommon/CpuTensorHandle.hpp>
Matteo Martincigh747ef822018-12-18 09:26:39 +000017#include <backendsCommon/WorkloadUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Matthew Benthamd80a7122019-01-08 17:52:37 +000019#include <arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h>
20
21using namespace armnnUtils;
22
telsoa014fcda012018-03-09 14:13:49 +000023namespace armnn
24{
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010025
telsoa014fcda012018-03-09 14:13:49 +000026using namespace armcomputetensorutils;
27
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010028arm_compute::Status NeonDepthwiseConvolutionWorkloadValidate(const TensorInfo& input,
Matteo Martincigh747ef822018-12-18 09:26:39 +000029 const TensorInfo& output,
30 const DepthwiseConvolution2dDescriptor& descriptor,
31 const TensorInfo& weights,
32 const Optional<TensorInfo>& biases)
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010033{
Matteo Martincigh747ef822018-12-18 09:26:39 +000034 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
35 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
36
37 // ArmNN's weight format is [ M, I, H, W ]
38 const unsigned int aclDepthMultiplier = weights.GetShape()[0];
39
40 // Convert the weight format from ArmNN's [ M, I, H, W ] (does NOT depend on the data layout) to either
41 // [ 1, H, W, I * M ] (if NHWC) or [ 1, I * M, H, W ] (if NCHW), as required by the compute library
42 TensorInfo weightsPermuted = ConvertWeightTensorInfoFromArmnnToAcl(weights, descriptor.m_DataLayout);
43
44 // Convert the weights into the compute library format
45 const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weightsPermuted, descriptor.m_DataLayout);
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010046
47 arm_compute::TensorInfo aclBiasesInfo;
48 arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
49
50 if (descriptor.m_BiasEnabled)
51 {
52 BOOST_ASSERT(biases.has_value());
53
54 aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
55 optionalAclBiasesInfo = &aclBiasesInfo;
56 }
57
Pablo Tellof0bd6832019-04-26 17:58:13 +010058 arm_compute::PadStrideInfo aclPadStrideInfo = BuildArmComputePadStrideInfo(descriptor);
59 const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(
60 descriptor.m_DilationX,descriptor.m_DilationY);
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010061
62 return arm_compute::NEDepthwiseConvolutionLayer::validate(&aclInputInfo,
63 &aclWeightsInfo,
64 optionalAclBiasesInfo,
65 &aclOutputInfo,
66 aclPadStrideInfo,
Pablo Tellof0bd6832019-04-26 17:58:13 +010067 aclDepthMultiplier,
68 arm_compute::ActivationLayerInfo(),
69 aclDilationInfo);
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010070}
71
72NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
telsoa014fcda012018-03-09 14:13:49 +000073 const DepthwiseConvolution2dQueueDescriptor& descriptor,
74 const WorkloadInfo& info)
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010075 : BaseWorkload<DepthwiseConvolution2dQueueDescriptor>(descriptor, info)
telsoa014fcda012018-03-09 14:13:49 +000076{
Matteo Martincigh747ef822018-12-18 09:26:39 +000077 // ArmNN's weight format is [ M, I, H, W ]
78 auto& weightInfo = m_Data.m_Weight->GetTensorInfo();
telsoa014fcda012018-03-09 14:13:49 +000079
Matteo Martincigh747ef822018-12-18 09:26:39 +000080 // Allocate a buffer for the swizzling of the weight tensor
81 std::unique_ptr<unsigned char[]> permuteBuffer(new unsigned char[m_Data.m_Weight->GetTensorInfo().GetNumBytes()]);
82
83 // Convert the weight format from ArmNN's [ M, I, H, W ] (does NOT depend on the data layout) to either
84 // [ 1, H, W, I * M ] (if NHWC) or [ 1, I * M, H, W ] (if NCHW), as required by the compute library
85 ConstTensor weightPermuted = ConvertWeightTensorFromArmnnToAcl(m_Data.m_Weight,
86 m_Data.m_Parameters.m_DataLayout,
87 permuteBuffer.get());
88
89 // Convert the weights into the compute library format
telsoa01c577f2c2018-08-31 09:22:23 +010090 m_KernelTensor = std::make_unique<arm_compute::Tensor>();
Matteo Martincigh747ef822018-12-18 09:26:39 +000091 BuildArmComputeTensor(*m_KernelTensor, weightPermuted.GetInfo(), m_Data.m_Parameters.m_DataLayout);
Mohamed Nour Abouelseoud7e7261e2018-11-27 17:35:35 +000092
telsoa014fcda012018-03-09 14:13:49 +000093 if (m_Data.m_Parameters.m_BiasEnabled)
94 {
telsoa01c577f2c2018-08-31 09:22:23 +010095 m_BiasTensor = std::make_unique<arm_compute::Tensor>();
Nikhil Rajcec6b652018-10-12 13:51:57 +010096 BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
telsoa014fcda012018-03-09 14:13:49 +000097 }
98
Pablo Tellof0bd6832019-04-26 17:58:13 +010099 const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(
100 m_Data.m_Parameters.m_DilationX, m_Data.m_Parameters.m_DilationY);
101
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100102 m_Data.ValidateInputsOutputs("NeonDepthwiseConvolutionWorkload", 1, 1);
telsoa014fcda012018-03-09 14:13:49 +0000103
Derek Lambertic81855f2019-06-13 17:34:19 +0100104 IAclTensorHandle* inputTensorHandle = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0]);
105 IAclTensorHandle* outputTensorHandle = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[0]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000106
Mohamed Nour Abouelseoud7e7261e2018-11-27 17:35:35 +0000107 arm_compute::ITensor& input = inputTensorHandle->GetTensor();
108 arm_compute::ITensor& output = outputTensorHandle->GetTensor();
telsoa014fcda012018-03-09 14:13:49 +0000109
Nikhil Rajcec6b652018-10-12 13:51:57 +0100110 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
111 input.info()->set_data_layout(aclDataLayout);
112 output.info()->set_data_layout(aclDataLayout);
113
Bruno Goncalves22972f02019-04-26 21:03:24 -0300114 // Get the depth multiplier
Matteo Martincigh747ef822018-12-18 09:26:39 +0000115 const unsigned int depthMultiplier = weightInfo.GetShape()[0];
Mohamed Nour Abouelseoud7e7261e2018-11-27 17:35:35 +0000116
Aron Virginas-Tar6f3785d2019-07-22 15:30:22 +0100117 arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
118
Aron Virginas-Tarf4c502f2019-11-14 16:21:38 +0000119 m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::NEDepthwiseConvolutionLayer>();
120 static_cast<arm_compute::NEDepthwiseConvolutionLayer*>(
121 m_pDepthwiseConvolutionLayer.get())->configure(&input,
122 m_KernelTensor.get(),
123 m_BiasTensor.get(),
124 &output,
125 padStrideInfo,
126 depthMultiplier,
127 arm_compute::ActivationLayerInfo(),
128 aclDilationInfo);
telsoa014fcda012018-03-09 14:13:49 +0000129
130 BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
131
Matteo Martincigh747ef822018-12-18 09:26:39 +0000132 ScopedCpuTensorHandle weightsPermutedHandle(weightPermuted);
133 InitializeArmComputeTensorData(*m_KernelTensor, &weightsPermutedHandle);
telsoa014fcda012018-03-09 14:13:49 +0000134
Mohamed Nour Abouelseoud7e7261e2018-11-27 17:35:35 +0000135 if (m_Data.m_Parameters.m_BiasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000136 {
Nattapat Chaimanowong177d8d22018-10-16 13:21:27 +0100137 InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
telsoa014fcda012018-03-09 14:13:49 +0000138 }
telsoa01c577f2c2018-08-31 09:22:23 +0100139
140 m_pDepthwiseConvolutionLayer->prepare();
141 FreeUnusedTensors();
telsoa014fcda012018-03-09 14:13:49 +0000142}
143
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100144void NeonDepthwiseConvolutionWorkload::Execute() const
telsoa014fcda012018-03-09 14:13:49 +0000145{
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100146 ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonDepthwiseConvolutionWorkload_Execute");
telsoa014fcda012018-03-09 14:13:49 +0000147 BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
148
149 m_pDepthwiseConvolutionLayer->run();
150}
151
Nattapat Chaimanowong77140882018-10-17 11:12:19 +0100152void NeonDepthwiseConvolutionWorkload::FreeUnusedTensors()
telsoa01c577f2c2018-08-31 09:22:23 +0100153{
154 FreeTensorIfUnused(m_KernelTensor);
155 FreeTensorIfUnused(m_BiasTensor);
156}
157
telsoa014fcda012018-03-09 14:13:49 +0000158} //namespace armnn