SiCong Li | f44bbc5 | 2022-08-29 18:25:51 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2022 Arm Limited. |
| 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h" |
| 25 | |
SiCong Li | f44bbc5 | 2022-08-29 18:25:51 +0100 | [diff] [blame] | 26 | #include "arm_compute/core/Validate.h" |
SiCong Li | f44bbc5 | 2022-08-29 18:25:51 +0100 | [diff] [blame] | 27 | #include "arm_compute/core/utils/misc/ShapeCalculator.h" |
| 28 | |
| 29 | #include "src/core/helpers/AutoConfiguration.h" |
| 30 | #include "src/dynamic_fusion/sketch/ArgumentPack.h" |
| 31 | #include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h" |
SiCong Li | f44bbc5 | 2022-08-29 18:25:51 +0100 | [diff] [blame] | 32 | #include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentDirectConv2d.h" |
| 33 | #include "src/gpu/cl/kernels/gemm/ClGemmHelpers.h" |
| 34 | |
Ramy Elgammal | 404462a | 2022-11-08 02:14:46 +0000 | [diff] [blame^] | 35 | #include "src/common/utils/Log.h" |
| 36 | |
SiCong Li | f44bbc5 | 2022-08-29 18:25:51 +0100 | [diff] [blame] | 37 | namespace arm_compute |
| 38 | { |
| 39 | namespace experimental |
| 40 | { |
| 41 | namespace dynamic_fusion |
| 42 | { |
| 43 | namespace |
| 44 | { |
| 45 | bool export_to_cl_image_support(const ITensorInfo *tensor, GPUTarget gpu_target, const cl::Device &device, DataLayout data_layout) |
| 46 | { |
| 47 | if(tensor->tensor_shape()[0] % 4 || (data_layout != DataLayout::NHWC)) |
| 48 | { |
| 49 | return false; |
| 50 | } |
| 51 | |
| 52 | // If not floating point |
| 53 | if(!is_data_type_float(tensor->data_type())) |
| 54 | { |
| 55 | return false; |
| 56 | } |
| 57 | |
| 58 | if(gpu_target == GPUTarget::G71 || get_arch_from_target(gpu_target) == GPUTarget::MIDGARD) |
| 59 | { |
| 60 | return false; |
| 61 | } |
| 62 | |
| 63 | // Check if the cl_khr_image2d_from_buffer extension is supported on the target platform |
| 64 | if(!image2d_from_buffer_supported(device)) |
| 65 | { |
| 66 | return false; |
| 67 | } |
| 68 | |
| 69 | // Check cl image pitch alignment |
| 70 | if(get_cl_image_pitch_alignment(device) == 0) |
| 71 | { |
| 72 | return false; |
| 73 | } |
| 74 | |
| 75 | const size_t image_w = tensor->tensor_shape()[0] / 4; |
| 76 | const size_t image_h = tensor->tensor_shape()[1] * tensor->tensor_shape()[2] * tensor->tensor_shape()[3]; |
| 77 | const size_t max_image_w = device.getInfo<CL_DEVICE_IMAGE2D_MAX_WIDTH>(); |
| 78 | const size_t max_image_h = device.getInfo<CL_DEVICE_IMAGE2D_MAX_HEIGHT>(); |
| 79 | |
| 80 | if(image_w > max_image_w || image_h > max_image_h) |
| 81 | { |
| 82 | return false; |
| 83 | } |
| 84 | |
| 85 | return true; |
| 86 | } |
| 87 | |
SiCong Li | a2b131b | 2022-11-04 10:11:32 +0000 | [diff] [blame] | 88 | constexpr GpuOperatorType operator_type = GpuOperatorType::Complex; |
SiCong Li | f44bbc5 | 2022-08-29 18:25:51 +0100 | [diff] [blame] | 89 | } // namespace |
| 90 | |
| 91 | Status GpuConv2d::validate_op(const GpuWorkloadSketch &sketch, |
| 92 | const ITensorInfo *src, |
| 93 | const ITensorInfo *wei, |
| 94 | const ITensorInfo *bia, |
| 95 | const ITensorInfo *dst, |
| 96 | const Conv2dAttributes &attributes) |
| 97 | { |
| 98 | ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, wei, dst); |
| 99 | ARM_COMPUTE_RETURN_ERROR_ON( |
| 100 | !src->has_valid_id() || !wei->has_valid_id() || !dst->has_valid_id()); |
| 101 | if(bia != nullptr) |
| 102 | { |
| 103 | ARM_COMPUTE_RETURN_ERROR_ON(!bia->has_valid_id()); |
| 104 | } |
SiCong Li | f44bbc5 | 2022-08-29 18:25:51 +0100 | [diff] [blame] | 105 | // Auto initialize dst tensor info |
| 106 | TensorInfo dst_info_to_validate = *dst; |
| 107 | const auto data_layout = src->data_layout(); |
| 108 | |
| 109 | { |
| 110 | auto shape = misc::shape_calculator::compute_deep_convolution_shape(src->tensor_shape(), data_layout, wei->tensor_shape(), |
| 111 | PadStrideInfo(attributes.stride().x(), attributes.stride().y(), attributes.pad().left, |
| 112 | attributes.pad().right, |
| 113 | attributes.pad().top, attributes.pad().bottom, DimensionRoundingType::FLOOR)); // use the default DimensionRoundingType |
| 114 | |
| 115 | auto_init_if_empty(dst_info_to_validate, src->clone()->set_tensor_shape(shape)); |
| 116 | } |
| 117 | |
Ramy Elgammal | 404462a | 2022-11-08 02:14:46 +0000 | [diff] [blame^] | 118 | // Perform fusion test |
| 119 | // Pack tensor infos |
| 120 | ArgumentPack<ITensorInfo> tensors; |
| 121 | tensors.add_const_tensor(ACL_SRC_0, src); |
| 122 | tensors.add_const_tensor(ACL_SRC_1, wei); |
| 123 | tensors.add_const_tensor(ACL_SRC_2, bia); |
| 124 | tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate); |
| 125 | const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors); |
| 126 | ARM_COMPUTE_RETURN_ERROR_ON_MSG(!sketch.implementation().operator_group().try_add_operator(op), |
| 127 | "Operator fusion test failed. This operator cannot be fused into the workload"); |
| 128 | |
SiCong Li | f44bbc5 | 2022-08-29 18:25:51 +0100 | [diff] [blame] | 129 | // Check support level |
| 130 | // Data type |
| 131 | ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32); |
| 132 | // Data layout |
| 133 | ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(src, DataLayout::NHWC); |
| 134 | |
| 135 | const auto sketch_ctx = sketch.implementation().context(); |
| 136 | |
| 137 | const auto gpu_target = sketch_ctx->gpu_target(); |
| 138 | |
| 139 | if(sketch_ctx->gpu_language() == GpuLanguage::OpenCL) |
| 140 | { |
| 141 | const auto cl_compile_ctx = sketch_ctx->cl_compile_context(); |
| 142 | ARM_COMPUTE_RETURN_ERROR_ON(cl_compile_ctx == nullptr); |
| 143 | // Validate Direct Conv2d Component |
| 144 | { |
| 145 | const auto properties = IGpuKernelComponent::Properties().stage(UnitWorkloadStage{ UnitWorkloadStage::Stage::Run }); |
| 146 | auto settings = ClComponentDirectConv2d::Settings(); |
| 147 | |
| 148 | settings.export_to_cl_image( |
| 149 | export_to_cl_image_support(src, gpu_target, cl_compile_ctx->get_device(), data_layout)); |
| 150 | |
| 151 | settings.fast_relaxed_math( |
| 152 | (gpu_target != GPUTarget::G71 && (gpu_target & GPUTarget::GPU_ARCH_MASK) == GPUTarget::BIFROST) |
| 153 | && (dst_info_to_validate.data_type() == DataType::F32 || dst_info_to_validate.data_type() == DataType::F16)); |
| 154 | |
| 155 | ArgumentPack<ITensorInfo> arguments; |
| 156 | arguments.add_const_tensor(ACL_SRC_0, src); |
| 157 | arguments.add_const_tensor(ACL_SRC_1, wei); |
| 158 | arguments.add_const_tensor(ACL_SRC_2, bia); |
| 159 | arguments.add_const_tensor(ACL_DST_0, &dst_info_to_validate); |
| 160 | ARM_COMPUTE_RETURN_ON_ERROR(ClComponentDirectConv2d::validate(properties, arguments, attributes, settings)); |
| 161 | } |
| 162 | } |
| 163 | else |
| 164 | { |
| 165 | ARM_COMPUTE_RETURN_ERROR_MSG("Unimplemented Gpu language"); |
| 166 | } |
| 167 | return Status{}; |
| 168 | } |
| 169 | |
| 170 | void GpuConv2d::create_op(GpuWorkloadSketch &sketch, |
| 171 | ITensorInfo *src, |
| 172 | ITensorInfo *wei, |
| 173 | ITensorInfo *bia, |
| 174 | ITensorInfo *dst, |
| 175 | const Conv2dAttributes &attributes) |
| 176 | { |
Ramy Elgammal | 404462a | 2022-11-08 02:14:46 +0000 | [diff] [blame^] | 177 | ARM_COMPUTE_LOG_PARAMS(src, wei, bia, dst, attributes); |
SiCong Li | f44bbc5 | 2022-08-29 18:25:51 +0100 | [diff] [blame] | 178 | // Assert validation |
| 179 | ARM_COMPUTE_ERROR_THROW_ON(GpuConv2d::validate_op(sketch, src, wei, bia, dst, attributes)); |
| 180 | ARM_COMPUTE_ERROR_ON_NULLPTR(src, wei, dst); |
| 181 | const auto data_layout = src->data_layout(); |
| 182 | |
| 183 | // Auto initialize dst tensor |
| 184 | { |
| 185 | auto shape = misc::shape_calculator::compute_deep_convolution_shape(src->tensor_shape(), data_layout, wei->tensor_shape(), |
| 186 | PadStrideInfo(attributes.stride().x(), attributes.stride().y(), attributes.pad().left, |
| 187 | attributes.pad().right, |
| 188 | attributes.pad().top, attributes.pad().bottom, DimensionRoundingType::FLOOR)); // use the default DimensionRoundingType |
| 189 | |
| 190 | auto_init_if_empty(*dst, src->clone()->set_tensor_shape(shape)); |
| 191 | } |
| 192 | |
| 193 | // Translate into components and add to component graph |
| 194 | auto &comp_graph = sketch.implementation().component_graph(); |
| 195 | |
| 196 | const auto sketch_ctx = sketch.implementation().context(); |
| 197 | |
| 198 | const auto gpu_target = sketch_ctx->gpu_target(); |
| 199 | |
| 200 | if(sketch_ctx->gpu_language() == GpuLanguage::OpenCL) |
| 201 | { |
| 202 | const auto cl_compile_ctx = sketch_ctx->cl_compile_context(); |
| 203 | ARM_COMPUTE_ERROR_ON(cl_compile_ctx == nullptr); |
| 204 | |
| 205 | // Add Direct Conv2d Component |
| 206 | { |
| 207 | auto properties = IGpuKernelComponent::Properties(); |
| 208 | properties.stage(UnitWorkloadStage{ UnitWorkloadStage::Stage::Run }); |
| 209 | |
| 210 | auto settings = ClComponentDirectConv2d::Settings(); |
| 211 | |
| 212 | settings.export_to_cl_image( |
| 213 | export_to_cl_image_support(src, gpu_target, cl_compile_ctx->get_device(), data_layout)); |
| 214 | |
| 215 | settings.fast_relaxed_math( |
| 216 | (gpu_target != GPUTarget::G71 && (gpu_target & GPUTarget::GPU_ARCH_MASK) == GPUTarget::BIFROST) |
| 217 | && (dst->data_type() == DataType::F32 || dst->data_type() == DataType::F16)); |
| 218 | |
| 219 | if(settings.export_to_cl_image()) |
| 220 | { |
| 221 | arm_compute::opencl::kernels::gemm::update_padding_for_cl_image(wei); |
| 222 | } |
| 223 | |
| 224 | ArgumentPack<ITensorInfo> arguments; |
| 225 | arguments.add_const_tensor(ACL_SRC_0, src); |
| 226 | arguments.add_const_tensor(ACL_SRC_1, wei); |
| 227 | arguments.add_const_tensor(ACL_SRC_2, bia); |
| 228 | arguments.add_const_tensor(ACL_DST_0, dst); |
| 229 | comp_graph.add_new_component<ClComponentDirectConv2d>(properties, arguments, attributes, settings); |
| 230 | } |
| 231 | } |
| 232 | else |
| 233 | { |
| 234 | ARM_COMPUTE_ERROR("Unimplemented Gpu language"); |
| 235 | } |
| 236 | |
| 237 | // Set up fusion test by adding to the Operator Group |
| 238 | // Note this has to be performed after all the components have been successfully added to the component graph |
| 239 | |
| 240 | // Pack tensor infos |
| 241 | ArgumentPack<ITensorInfo> tensors; |
| 242 | tensors.add_const_tensor(ACL_SRC_0, src); |
| 243 | tensors.add_tensor(ACL_SRC_1, wei); |
| 244 | tensors.add_const_tensor(ACL_SRC_2, bia); |
| 245 | tensors.add_tensor(ACL_DST_0, dst); |
| 246 | |
| 247 | const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors); |
| 248 | sketch.implementation().operator_group().add_operator(op); |
| 249 | } |
| 250 | |
| 251 | } // namespace dynamic_fusion |
| 252 | } // namespace experimental |
| 253 | } // namespace arm_compute |