blob: 709f8fa971cd73a4e9caea422fcf8549ecd51f63 [file] [log] [blame]
Gian Marco Iodice781cba72020-06-19 16:56:57 +01001/*
Gian Marco Iodice3cce35d2022-12-30 16:07:45 +00002 * Copyright (c) 2020-2023 Arm Limited.
Gian Marco Iodice781cba72020-06-19 16:56:57 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
SiCongLi1af54162021-10-06 15:25:57 +010024#include "arm_compute/core/CL/CLCompileContext.h"
Jakub Sujak8c49f162023-06-16 09:52:50 +010025#include "arm_compute/core/CL/CLKernelLibrary.h"
26#include "arm_compute/core/CL/ICLTensor.h"
SiCongLi1af54162021-10-06 15:25:57 +010027#include "arm_compute/core/Validate.h"
28#include "support/StringSupport.h"
Gian Marco Iodice781cba72020-06-19 16:56:57 +010029
30#include "src/core/CL/CLUtils.h"
SiCongLi31778612021-11-12 17:33:45 +000031#include "src/core/experimental/PostOpUtils.h"
Gian Marco Iodice781cba72020-06-19 16:56:57 +010032
SiCongLi1af54162021-10-06 15:25:57 +010033namespace arm_compute
34{
Jakub Sujak8c49f162023-06-16 09:52:50 +010035cl::Image2D create_image2d_from_tensor(const ICLTensor *tensor, CLImage2DType image_type)
Gian Marco Iodice781cba72020-06-19 16:56:57 +010036{
Jakub Sujak8c49f162023-06-16 09:52:50 +010037 ARM_COMPUTE_ERROR_ON_NULLPTR(tensor);
38
39 const cl::Context &ctx = CLKernelLibrary::get().context();
40 const cl::Buffer &buffer = tensor->cl_buffer();
41 const ITensorInfo *info = tensor->info();
42 ARM_COMPUTE_ERROR_ON_MSG(info->lock_paddings(),
43 "Tensor paddings must not be locked to allow extending paddings to satisfy cl_image pitch alignment requirement");
44
45 const size_t image_w{ info->dimension(0) / 4 };
46 const size_t image_h{ info->tensor_shape().total_size() / info->dimension(0) };
47 const size_t max_image_w{ CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_WIDTH>() };
48 const size_t max_image_h{ CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_IMAGE2D_MAX_HEIGHT>() };
49
50 ARM_COMPUTE_UNUSED(max_image_w, max_image_h);
51 ARM_COMPUTE_ERROR_ON_MSG(image_w > max_image_w, "Image width exceeds maximum width for exporting to cl_image");
52 ARM_COMPUTE_ERROR_ON_MSG(image_h > max_image_h, "Image height exceeds maximum height for exporting to cl_image");
53
54 const TensorShape shape2d(image_w, image_h);
55 const size_t image_row_pitch = info->strides_in_bytes()[1];
56
57 return create_image2d_from_buffer(ctx, buffer, shape2d, info->data_type(), image_row_pitch, image_type);
58}
59
60cl::Image2D create_image2d_from_buffer(const cl::Context &ctx, const cl::Buffer &buffer, const TensorShape &shape2d, DataType data_type, size_t image_row_pitch, CLImage2DType image_type)
61{
62 ARM_COMPUTE_ERROR_ON_MSG(!image2d_from_buffer_supported(CLKernelLibrary::get().get_device()),
63 "The extension cl_khr_image2d_from_buffer is not supported on the target platform");
64 ARM_COMPUTE_ERROR_ON_MSG(get_cl_image_pitch_alignment(CLKernelLibrary::get().get_device()) == 0,
65 "Impossible to retrieve the cl_image pitch alignment");
66 ARM_COMPUTE_ERROR_ON_MSG(buffer.get() == nullptr,
67 "Cannot create cl_image from empty cl_buffer");
68
Gian Marco Iodice6f931342020-09-15 14:17:41 +010069 cl_channel_type cl_data_type;
70
71 switch(data_type)
72 {
73 case DataType::F32:
74 cl_data_type = CL_FLOAT;
75 break;
76 case DataType::F16:
77 cl_data_type = CL_HALF_FLOAT;
78 break;
79 default:
80 ARM_COMPUTE_ERROR("Data type not support with OpenCL image2d");
81 }
82
Gian Marco Iodice781cba72020-06-19 16:56:57 +010083 cl_mem cl_image;
84 cl_int err = CL_SUCCESS;
85
Gian Marco Iodice6f931342020-09-15 14:17:41 +010086 const cl_image_format format = { CL_RGBA, cl_data_type };
Gian Marco Iodice781cba72020-06-19 16:56:57 +010087
88 cl_image_desc desc;
89 memset(&desc, 0, sizeof(desc));
90 desc.image_type = CL_MEM_OBJECT_IMAGE2D;
91 desc.mem_object = buffer();
92 desc.image_row_pitch = image_row_pitch;
93 desc.image_width = shape2d[0];
94 desc.image_height = shape2d[1];
95
Jakub Sujak8c49f162023-06-16 09:52:50 +010096 switch(image_type)
Gian Marco Iodice3cce35d2022-12-30 16:07:45 +000097 {
98 case CLImage2DType::ReadOnly:
99 cl_image = clCreateImage(ctx(), CL_MEM_READ_ONLY, &format, &desc, nullptr, &err);
100 break;
101 case CLImage2DType::WriteOnly:
102 cl_image = clCreateImage(ctx(), CL_MEM_WRITE_ONLY, &format, &desc, nullptr, &err);
103 break;
104 default:
105 ARM_COMPUTE_ERROR("Unsupported CLImage2DType");
106 }
Gian Marco Iodice781cba72020-06-19 16:56:57 +0100107
108 ARM_COMPUTE_UNUSED(err);
109 ARM_COMPUTE_ERROR_ON_MSG(err != CL_SUCCESS, "Error during the creation of CL image from buffer");
110
111 return cl::Image2D(cl_image);
112}
SiCongLi1af54162021-10-06 15:25:57 +0100113
114namespace experimental
115{
116PostOpCLKernelUtils::PostOpCLKernelUtils(const Config &supported_config)
117 : _supported_config(supported_config)
118{
119 ARM_COMPUTE_ERROR_ON_MSG(supported_config.empty(), "Empty PostOp CL kernel support configuration is not allowed");
120 for(auto it = _supported_config.begin(); it != _supported_config.end(); ++it)
121 {
122 auto post_op_sequence = it->first;
123 auto post_op_slots = std::get<1>(it->second);
124 ARM_COMPUTE_ERROR_ON_MSG(post_op_sequence.size() != post_op_slots.size(), "The number of PostOps must be the same as that of the assigned slots");
125 }
126}
127
128bool PostOpCLKernelUtils::are_post_op_shapes_compliant(const ITensorInfo *dst, const experimental::PostOpList<ITensorInfo *> &post_ops)
129{
SiCongLi1af54162021-10-06 15:25:57 +0100130 for(const auto &op : post_ops.get_list())
131 {
132 for(const auto &tensor : op->arguments())
133 {
134 const TensorShape &out_shape = TensorShape::broadcast_shape(dst->tensor_shape(), (*tensor)->tensor_shape());
SiCongLieb8bd812021-10-29 15:05:49 +0100135 // All post ops must be elementwise and must not alter the shape of the original dst tensor after broadcasting
SiCongLi1af54162021-10-06 15:25:57 +0100136 if(detail::have_different_dimensions(out_shape, dst->tensor_shape(), 0))
137 {
138 return false;
139 }
SiCongLieb8bd812021-10-29 15:05:49 +0100140 // NOTE: Kernel limitation: currently only the following broadcasting types are supported:
SiCongLid9287352021-11-03 19:01:22 +0000141 // 1. Post op arg is scalar, broadcast in both first and second dims
142 // 2. Post op arg is of shape: second dim=1, first dim=N, broadcast only in second dim
143 // This means this case: Post op arg is of shape: second dim=M, first dim=1, broadcast only in first dim, is NOT supported
SiCongLieb8bd812021-10-29 15:05:49 +0100144 if(dst->dimension(0) > 1 && dst->dimension(1) > 1 && (*tensor)->dimension(0) == 1 && (*tensor)->dimension(1) > 1)
145 {
146 return false;
147 }
SiCongLi1af54162021-10-06 15:25:57 +0100148 }
149 }
150 return true;
151}
152
153bool PostOpCLKernelUtils::is_post_op_sequence_supported(const PostOpList<ITensorInfo *> &post_ops) const
154{
155 if(post_ops.size() == 0)
156 {
157 return true; // Always support cases where no post op is specified
158 }
159 const auto post_op_sequence = get_post_op_sequence(post_ops);
160
161 return _supported_config.find(post_op_sequence) != _supported_config.end();
162}
163
164void PostOpCLKernelUtils::set_post_ops_cl_build_options(CLBuildOptions &build_opts, const PostOpList<ITensorInfo *> &post_ops) const
165{
166 const auto post_op_sequence = get_post_op_sequence(post_ops);
167 const auto slots = std::get<1>(_supported_config.at(post_op_sequence));
168 for(size_t post_op_id = 0; post_op_id < post_ops.size(); ++post_op_id)
169 {
170 const auto &post_op = post_ops.get_list().at(post_op_id);
171 const auto slot_prefix = "-DP" + support::cpp11::to_string(slots[post_op_id]);
172 if(post_op->type() == experimental::PostOpType::Activation)
173 {
174 const auto _post_op = utils::cast::polymorphic_downcast<const experimental::PostOpAct<ITensorInfo *> *>(post_op.get());
175 const auto act_type = slot_prefix + "_ACTIVATION_TYPE=" + lower_string(string_from_activation_func(_post_op->_act_info.activation()));
176 const auto act_a_val = slot_prefix + "_ACTIVATION_A_VAL=" + float_to_string_with_full_precision(_post_op->_act_info.a());
177 const auto act_b_val = slot_prefix + "_ACTIVATION_B_VAL=" + float_to_string_with_full_precision(_post_op->_act_info.b());
178 build_opts.add_option(act_type);
179 build_opts.add_option(act_a_val);
180 build_opts.add_option(act_b_val);
181 }
182 else if(post_op->type() == experimental::PostOpType::Eltwise_Add)
183 {
184 size_t arg_id = 1;
185 const auto eltwise_op = slot_prefix + "_ELTWISE_OP=ADD" + "_X_POS_" + support::cpp11::to_string(post_op->prev_dst_pos());
186 build_opts.add_option(eltwise_op);
187 for(const auto &tensor : post_op->arguments())
188 {
189 const auto height = slot_prefix + "_ELTWISE_ARG" + support::cpp11::to_string(arg_id) + "_HEIGHT=" + support::cpp11::to_string((*tensor)->dimension(1));
190 const auto width = slot_prefix + "_ELTWISE_ARG" + support::cpp11::to_string(arg_id) + "_WIDTH=" + support::cpp11::to_string((*tensor)->dimension(0));
191 build_opts.add_option(height);
192 build_opts.add_option(width);
193 ++arg_id;
194 }
195 }
ramelg016049eda2021-10-29 10:52:53 +0100196 else if(post_op->type() == experimental::PostOpType::Eltwise_PRelu)
197 {
198 size_t arg_id = 1;
199 const auto eltwise_op = slot_prefix + "_ELTWISE_OP=PRELU" + "_X_POS_" + support::cpp11::to_string(post_op->prev_dst_pos());
200 build_opts.add_option(eltwise_op);
201 for(const auto &tensor : post_op->arguments())
202 {
203 const auto height = slot_prefix + "_ELTWISE_ARG" + support::cpp11::to_string(arg_id) + "_HEIGHT=" + support::cpp11::to_string((*tensor)->dimension(1));
204 const auto width = slot_prefix + "_ELTWISE_ARG" + support::cpp11::to_string(arg_id) + "_WIDTH=" + support::cpp11::to_string((*tensor)->dimension(0));
205 build_opts.add_option(height);
206 build_opts.add_option(width);
207 ++arg_id;
208 }
209 }
SiCongLi1af54162021-10-06 15:25:57 +0100210 }
211}
212
213void PostOpCLKernelUtils::set_post_ops_cl_kernel_name(std::string &kernel_name, const PostOpList<ITensorInfo *> &post_ops) const
214{
215 const auto post_op_sequence = get_post_op_sequence(post_ops);
216 const auto postfix = std::get<0>(_supported_config.at(post_op_sequence));
217 kernel_name += postfix;
218}
219} // namespace experimental
220
Gian Marco Iodice3cce35d2022-12-30 16:07:45 +0000221} // namespace arm_compute