blob: dd55d4d64ae92b0a5d33a3a8af2b389a00c518a8 [file] [log] [blame]
Tianle Chengfbfa49e2024-01-23 11:21:48 +00001//
2// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "GpuFsaDepthwiseConvolution2d.hpp"
7
8#include <armnn/Types.hpp>
9
10#include <backendsCommon/WorkloadUtils.hpp>
11#include <aclCommon/ArmComputeTensorUtils.hpp>
12
13#include <arm_compute/core/ITensorInfo.h>
14#include <arm_compute/core/TensorInfo.h>
15#include <arm_compute/core/TensorShape.h>
16#include <arm_compute/core/CL/CLKernelLibrary.h>
17#include <arm_compute/core/CL/CLCompileContext.h>
18
19#include <arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h>
20#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
Tianle Chengfbfa49e2024-01-23 11:21:48 +000021#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.h>
22#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
23
24#include <vector>
25
26namespace armnn
27{
28
29using namespace armcomputetensorutils;
30
31arm_compute::Status GpuFsaDepthwiseConvolution2dValidate(const TensorInfo& input,
32 const DepthwiseConvolution2dDescriptor& descriptor,
33 const TensorInfo& weights,
34 const Optional<TensorInfo>& biases)
35{
36 // Create a new workload sketch, for validation purposes
37 auto compileCtx = arm_compute::CLKernelLibrary::get().get_compile_context();
38 auto workloadContext = GpuWorkloadContext(&compileCtx);
39 GpuWorkloadSketch sketch{ &workloadContext };
40
41 // Build and create tensor infos using the sketch
42 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
43
44 // ArmNN format for weights for depthwise is [1, H, W, C] independently of the input/output layout
45 //
46 // ACL format for weights for depthwise is:
47 // - [1, H, W, C] for [N, H, W, C] input/output layout (matches with ArmNN)
48 // - [1, C, H, W] for [N, C, H, W] input/output layout
49 //
50 // Therefore ArmNN weights have to be permuted when input/output layout is [N, C, H, W] to pass them to ACL.
51 // The PermuteDepthwiseConv2dWeights backend optimization takes care of this, but it has not been performed yet,
52 // so we do the permute here for the TensorInfo weights.
53 unsigned int aclDepthMultiplier;
54 TensorInfo weightsPermuted;
55 std::tie(weightsPermuted, aclDepthMultiplier) = Convert1HWOTensorInfoToAcl(weights, input,descriptor.m_DataLayout);
56 auto weightsShape = weightsPermuted.GetShape();
57 weightsPermuted.SetShape({weightsShape[1], weightsShape[2], weightsShape[3]});
58
59 arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weightsPermuted, descriptor.m_DataLayout);
60 aclWeightsInfo.set_are_values_constant(weights.IsConstant());
61
62 auto inputInfo = workloadContext.create_tensor_info(aclInputInfo);
63 auto weightInfo = workloadContext.create_tensor_info(aclWeightsInfo);
64
65 // Only create the bias tensor info if enabled, otherwise pass nullptr to validate_op
66 arm_compute::TensorInfo aclBiasInfo;
67 arm_compute::ITensorInfo* biasSketchInfoPtr = nullptr;
68
69 if (descriptor.m_BiasEnabled)
70 {
71 if(!biases.has_value())
72 {
73 throw InvalidArgumentException(
74 "GpuFsaDepthwiseConvolution2dValidate: No biases set when biases are enabled");
75 }
76 aclBiasInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
77 aclBiasInfo.set_are_values_constant(biases.value().IsConstant());
78
79 biasSketchInfoPtr = workloadContext.create_tensor_info(aclBiasInfo);
80 }
81
82 // Set DepthwiseConv2d attributes using descriptor
83 const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(descriptor.m_DilationX,
84 descriptor.m_DilationY);
85 const arm_compute::Padding2D aclPadInfo = BuildArmComputePaddingInfo(descriptor);
86 const arm_compute::Size2D aclStrideInfo = BuildArmComputeSize2D(descriptor.m_StrideX, descriptor.m_StrideY);
87
88 DepthwiseConv2dAttributes depthwiseConv2dAttributes{};
89 depthwiseConv2dAttributes.pad(aclPadInfo);
90 depthwiseConv2dAttributes.stride(aclStrideInfo);
91 depthwiseConv2dAttributes.dilation(aclDilationInfo);
92 depthwiseConv2dAttributes.depth_multiplier(aclDepthMultiplier);
93
94 // Validate operator, check status and update reasonIfUnsupported
95 arm_compute::Status aclStatus = GpuDepthwiseConv2d::validate_op(sketch,
96 inputInfo,
97 weightInfo,
98 biasSketchInfoPtr,
99 depthwiseConv2dAttributes);
100
101 return aclStatus;
102}
103
104void GpuFsaDepthwiseConvolution2dCreateOp(GpuFsaPreCompiledBlob* blob,
105 const TensorInfo& input,
106 const DepthwiseConvolution2dDescriptor& descriptor,
107 const TensorInfo& weights,
108 const Optional<TensorInfo>& biases)
109{
110/*
111* Creating an Op for the GpuFsa backend requires us to create and maintain quite a bit of data, which is then stored
112* in a GpuFsaPreCompiledBlob for execution later. Specifically we need:
113* GpuWorkloadContext, this contains the TensorInfos and is unique to the Graph being executed
114* Sketch, this is similar to a subgraph and can contain one or more operations. Multiple ops can be "fused" together
115* using a single sketch.
116* The inputTensorinfos / outputTensorInfos, these are pointers to the TensorInfos used when creating the sketch.
117* They refer to the TensorInfos stored within the GpuWorkloadContext and are needed when executing the sketch
118* as the TensorInfos used when creating the Tensors must match those used to create the Sketch. Otherwise the runtime
119* doesn't know which Tensors to use.
120*/
121 using namespace arm_compute::experimental::dynamic_fusion;
122 GpuWorkloadSketch* sketch = blob->sketch.get();
123 GpuWorkloadContext* workloadContext = blob->workloadContext.get();
124 std::vector<arm_compute::ITensorInfo*> inputTensorInfos = {};
125 std::vector<arm_compute::ITensorInfo*> outputTensorInfos = {};
126
127 // Build and create tensor infos using the sketch
128 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
129
130 // ArmNN format for weights for depthwise is [1, H, W, C] independently of the input/output layout
131 //
132 // ACL format for weights for depthwise is:
133 // - [1, H, W, C] for [N, H, W, C] input/output layout (matches with ArmNN)
134 // - [1, C, H, W] for [N, C, H, W] input/output layout
135 //
136 // Therefore ArmNN weights have to be permuted when input/output layout is [N, C, H, W] to pass them to ACL.
137 // The PermuteDepthwiseConv2dWeights backend optimization takes care of this, but it has not been performed yet,
138 // so we do the permute here for the TensorInfo weights.
139 unsigned int aclDepthMultiplier;
140 TensorInfo weightsPermuted;
141 std::tie(weightsPermuted, aclDepthMultiplier) = Convert1HWOTensorInfoToAcl(weights, input,descriptor.m_DataLayout);
142 auto weightsShape = weightsPermuted.GetShape();
143 weightsPermuted.SetShape({weightsShape[1], weightsShape[2], weightsShape[3]});
144
145 arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weightsPermuted, descriptor.m_DataLayout);
146 aclWeightsInfo.set_are_values_constant(weights.IsConstant());
147
148 inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInputInfo));
149 inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclWeightsInfo));
150
151 // Only create the bias tensor info if enabled, otherwise pass nullptr to validate_op
152 arm_compute::TensorInfo aclBiasInfo;
153 arm_compute::ITensorInfo* biasSketchInfoPtr = nullptr;
154
155 if (descriptor.m_BiasEnabled)
156 {
157 if(!biases.has_value())
158 {
159 throw InvalidArgumentException("GpuFsaConvolution2dValidate: No biases set when biases are enabled");
160 }
161 aclBiasInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
162 aclBiasInfo.set_are_values_constant(biases.value().IsConstant());
163
164 inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclBiasInfo));
165 biasSketchInfoPtr = inputTensorInfos[2];
166 }
167
168 // Set DepthwiseConv2d attributes using descriptor
169 const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(descriptor.m_DilationX,
170 descriptor.m_DilationY);
171 const arm_compute::Padding2D aclPadInfo = BuildArmComputePaddingInfo(descriptor);
172 const arm_compute::Size2D aclStrideInfo = BuildArmComputeSize2D(descriptor.m_StrideX, descriptor.m_StrideY);
173
174 DepthwiseConv2dAttributes depthwiseConv2dAttributes{};
175 depthwiseConv2dAttributes.pad(aclPadInfo);
176 depthwiseConv2dAttributes.stride(aclStrideInfo);
177 depthwiseConv2dAttributes.dilation(aclDilationInfo);
178 depthwiseConv2dAttributes.depth_multiplier(aclDepthMultiplier);
179
180 // Validate operator, check status and update reasonIfUnsupported
181 arm_compute::Status aclStatus = GpuDepthwiseConv2d::validate_op(*sketch,
182 inputTensorInfos[0],
183 inputTensorInfos[1],
184 biasSketchInfoPtr,
185 depthwiseConv2dAttributes);
186
187 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
188 if (!supported)
189 {
190 throw BackendCapabilityException(
191 "\"GpuFsa\" backend failed during DepthwiseConvolution2D operation validation");
192 }
193
194 // Create the Op within the Sketch using the TensorInfos we have stored
195 arm_compute::ITensorInfo* convOutInfo = GpuDepthwiseConv2d::create_op(*sketch,
196 inputTensorInfos[0],
197 inputTensorInfos[1],
198 biasSketchInfoPtr,
199 depthwiseConv2dAttributes);
200
201 outputTensorInfos.emplace_back(workloadContext->create_tensor_info());
202 GpuOutput::create_op(*sketch, convOutInfo, outputTensorInfos[0]);
203
204 // Store the TensorInfos within the blob as unique_ptrs to be used later
205 blob->inputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(inputTensorInfos);
206 blob->outputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(outputTensorInfos);
207}
208
209} // namespace armnn