blob: af4fd88593d16f5c30325024dc36eba43284c37b [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h"
25
26#include "arm_compute/core/AccessWindowStatic.h"
27#include "arm_compute/core/CL/CLHelpers.h"
28#include "arm_compute/core/CL/CLKernelLibrary.h"
29#include "arm_compute/core/CL/ICLTensor.h"
30#include "arm_compute/core/CL/OpenCL.h"
31#include "arm_compute/core/Helpers.h"
32#include "arm_compute/core/TensorInfo.h"
33#include "arm_compute/core/Utils.h"
34#include "arm_compute/core/Validate.h"
35#include "arm_compute/core/Window.h"
Chunosovf450caa2017-11-08 16:09:35 +070036#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010037
38#include <set>
39#include <string>
40
41using namespace arm_compute;
Chunosovf450caa2017-11-08 16:09:35 +070042namespace
43{
44/** Calculates softmax parameters from the quantized input scale and scaling factor for the exponent and places them as build options.
45 *
46 * Prepares these build options:
47 * -INPUT_BETA_MULTIPLIER, INPUT_BETA_LEFT_SHIFT - quantized representation of beta multiplier.
48 * -DIFF_MIN - threshold difference between maximum value of input data and current processed value,
49 * it defines whether the value will be taken into account or not.
50 *
51 * @param[in] build_opts Build options to extend
52 * @param[in] input_scale Input scaling factor
53 * @param[in] beta Exponent scaling factor beta
54 */
55CLBuildOptions prepare_quantized_softmax_build_options(float input_scale, float beta)
56{
57 // Number of integer bits in temporary fixed-point representation of current-to-max difference
58 static const int scaled_diff_int_bits = 5;
59 // Number of integer bits used in temporary fixed-point representation of exponent accumulator
60 static const int exp_accumulation_in_bits = 12;
61
62 const double beta_multiplier = std::min(
63 1.0 * beta * input_scale * (1 << (31 - scaled_diff_int_bits)),
64 (1ll << 31) - 1.0);
65 int input_beta_multiplier, input_beta_left_shift;
66 quantization::calculate_quantized_multiplier_greater_than_one(beta_multiplier, &input_beta_multiplier, &input_beta_left_shift);
67
68 const double max_input_rescaled = 1.0 * ((1 << scaled_diff_int_bits) - 1) * (1ll << (31 - scaled_diff_int_bits)) / (1ll << input_beta_left_shift);
69 const int diff_min = -1.f * std::floor(max_input_rescaled);
70
71 CLBuildOptions build_opts;
72 build_opts.add_option("-DSCALED_DIFF_INT_BITS=" + support::cpp11::to_string(scaled_diff_int_bits));
73 build_opts.add_option("-DEXP_ACCUMULATION_INT_BITS=" + support::cpp11::to_string(exp_accumulation_in_bits));
74 build_opts.add_option("-DINPUT_BETA_MULTIPLIER=" + support::cpp11::to_string(input_beta_multiplier));
75 build_opts.add_option("-DINPUT_BETA_LEFT_SHIFT=" + support::cpp11::to_string(input_beta_left_shift));
76 build_opts.add_option("-DDIFF_MIN=" + support::cpp11::to_string(diff_min));
77
78 return build_opts;
79}
80} // namespace
Anthony Barbier6ff3b192017-09-04 18:44:23 +010081
82void CLLogits1DMaxKernel::configure(const ICLTensor *input, ICLTensor *output)
83{
Chunosovf450caa2017-11-08 16:09:35 +070084 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::F16, DataType::F32);
Georgios Pinitasd368df32017-07-04 11:06:15 +010085 ARM_COMPUTE_ERROR_ON_NULLPTR(output);
86
87 // Softmax across the x dimension
88 TensorShape output_shape{ input->info()->tensor_shape() };
89 output_shape.set(0, 1);
90
91 // Output auto initialization if not yet initialized
Chunosovf450caa2017-11-08 16:09:35 +070092 auto_init_if_empty(*output->info(),
93 output_shape,
94 1,
95 input->info()->data_type(),
96 input->info()->fixed_point_position(),
97 input->info()->quantization_info());
Georgios Pinitasd368df32017-07-04 11:06:15 +010098
Anthony Barbier6ff3b192017-09-04 18:44:23 +010099 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
Georgios Pinitasd368df32017-07-04 11:06:15 +0100100 ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(input, output);
101 ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->info()->tensor_shape(), output_shape);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100102
103 _input = input;
104 _output = output;
105
Chunosovf450caa2017-11-08 16:09:35 +0700106 const DataType data_type = input->info()->data_type();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100107 // The kernel loops over all elements in steps of 16
108 const unsigned int num_elems_processed_per_iteration = ceil_to_multiple(input->info()->dimension(0), 16);
109
110 // Set build options
Chunosovf450caa2017-11-08 16:09:35 +0700111 CLBuildOptions build_opts;
112 build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
113 build_opts.add_option_if(is_data_type_fixed_point(data_type),
114 "-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position()));
115 build_opts.add_option_if(data_type == DataType::F16, "-DUSE_F16");
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100116 // Tell the kernel that the width is not a multiple of 16
Chunosovf450caa2017-11-08 16:09:35 +0700117 build_opts.add_option_if((input->info()->dimension(0) % max_cl_vector_width) != 0, "-DNON_MULTIPLE_OF_16");
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100118
119 // Create kernel
Chunosovf450caa2017-11-08 16:09:35 +0700120 std::string kernel_name = is_data_type_quantized_asymmetric(data_type) ? "softmax_layer_max_quantized" : "softmax_layer_max";
121 _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100122
123 // Set fixed arguments
steniu010d523cc2017-07-13 14:24:23 +0100124 unsigned int idx = 2 * num_arguments_per_3D_tensor(); //Skip the input and output parameters
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100125 _kernel.setArg<cl_uint>(idx++, input->info()->dimension(0));
126
127 // Configure kernel window
128 constexpr unsigned int num_elems_written_per_iteration = 1;
129
130 Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
131 AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration);
132 AccessWindowHorizontal output_access(output->info(), 0, num_elems_written_per_iteration);
133
134 update_window_and_padding(win, input_access, output_access);
135
136 output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
137
138 ICLKernel::configure(win);
139}
140
141CLLogits1DShiftExpSumKernel::CLLogits1DShiftExpSumKernel()
142 : _input(nullptr), _max(nullptr), _output(nullptr), _sum(nullptr)
143{
144}
145
Pablo Palmier48a60f92017-10-18 11:03:08 +0100146void CLLogits1DShiftExpSumKernel::configure(const ICLTensor *input, const ICLTensor *max, ICLTensor *output, ICLTensor *sum, float beta)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100147{
Chunosovf450caa2017-11-08 16:09:35 +0700148 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QASYMM8, DataType::QS16, DataType::F16, DataType::F32);
Georgios Pinitasd368df32017-07-04 11:06:15 +0100149 ARM_COMPUTE_ERROR_ON_NULLPTR(max, sum, output);
150
Chunosovf450caa2017-11-08 16:09:35 +0700151 const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(input->info()->data_type());
152 const DataType tmp_data_type = is_quantized_asymmetric ? DataType::S32 : input->info()->data_type();
Georgios Pinitasd368df32017-07-04 11:06:15 +0100153
Chunosovf450caa2017-11-08 16:09:35 +0700154 // Output auto initialization if not yet initialized
155 auto_init_if_empty(*sum->info(), max->info()->tensor_shape(), 1, tmp_data_type, input->info()->fixed_point_position());
156 auto_init_if_empty(*output->info(), input->info()->tensor_shape(), 1, tmp_data_type, input->info()->fixed_point_position());
157
Georgios Pinitase5f8fd62017-06-23 18:03:44 +0100158 ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
Georgios Pinitasd368df32017-07-04 11:06:15 +0100159 ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(max, sum);
Chunosovf450caa2017-11-08 16:09:35 +0700160 if(is_quantized_asymmetric)
161 {
162 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, max);
163 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(output, sum);
164 }
165 else
166 {
167 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, max, sum);
168 ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(input, output, max, sum);
169 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100170
171 _input = input;
172 _max = max;
173 _output = output;
174 _sum = sum;
175
Georgios Pinitas388d3ec2017-11-02 12:17:56 +0000176 const DataType dt = input->info()->data_type();
177 auto beta_int = static_cast<int>(lround(beta * (1 << input->info()->fixed_point_position())));
178
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100179 // The kernel loops over all elements in steps of 16
180 const unsigned int num_elems_processed_per_iteration = ceil_to_multiple(input->info()->dimension(0), 16);
181
182 // Set build options
Georgios Pinitas388d3ec2017-11-02 12:17:56 +0000183 CLBuildOptions build_opts;
184 build_opts.add_option(std::string("-DDATA_TYPE=" + get_cl_type_from_data_type(dt)));
185 build_opts.add_option_if(is_data_type_fixed_point(dt),
186 std::string("-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position())));
187 build_opts.add_option_if(dt == DataType::F16, std::string("-DUSE_F16"));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100188 // Tell the kernel that the width is not a multiple of 16
Georgios Pinitas388d3ec2017-11-02 12:17:56 +0000189 build_opts.add_option_if((input->info()->dimension(0) % max_cl_vector_width) != 0, std::string("-DNON_MULTIPLE_OF_16"));
190 build_opts.add_option_if(is_data_type_fixed_point(dt) && (beta != 1.0f), std::string("-DBETA=" + support::cpp11::to_string(beta_int)));
191 build_opts.add_option_if(is_data_type_float(dt) && (beta != 1.0f), std::string("-DBETA=" + float_to_string_with_full_precision(beta)));
Chunosovf450caa2017-11-08 16:09:35 +0700192 build_opts.add_options_if(is_quantized_asymmetric,
193 prepare_quantized_softmax_build_options(input->info()->quantization_info().scale, beta).options());
Pablo Palmier48a60f92017-10-18 11:03:08 +0100194
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100195 // Create kernel
Chunosovf450caa2017-11-08 16:09:35 +0700196 std::string kernel_name = is_quantized_asymmetric ? "softmax_layer_shift_exp_sum_quantized" : "softmax_layer_shift_exp_sum";
197 _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100198
199 // Set fixed arguments
steniu010d523cc2017-07-13 14:24:23 +0100200 unsigned int idx = 4 * num_arguments_per_3D_tensor(); //Skip the input and output parameters
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100201 _kernel.setArg<cl_uint>(idx++, input->info()->dimension(0));
202
203 // Configure window
204 Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
205
206 AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration);
207 AccessWindowHorizontal max_access(max->info(), 0, 1);
208 AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
209 AccessWindowHorizontal sum_access(sum->info(), 0, 1);
210
211 update_window_and_padding(win, input_access, max_access, output_access, sum_access);
212
213 output_access.set_valid_region(win, input->info()->valid_region());
214 sum_access.set_valid_region(win, ValidRegion(Coordinates(), sum->info()->tensor_shape()));
215
216 ICLKernel::configure(win);
217}
218
219void CLLogits1DShiftExpSumKernel::run(const Window &window, cl::CommandQueue &queue)
220{
221 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
222 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
223
steniu010d523cc2017-07-13 14:24:23 +0100224 Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
225 Window slice = window_collapsed.first_slice_window_3D();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100226
227 do
228 {
229 unsigned int idx = 0;
230 // Set inputs
steniu010d523cc2017-07-13 14:24:23 +0100231 add_3D_tensor_argument(idx, _input, slice);
232 add_3D_tensor_argument(idx, _max, slice);
233 add_3D_tensor_argument(idx, _output, slice);
234 add_3D_tensor_argument(idx, _sum, slice);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100235 enqueue(queue, *this, slice);
236 }
steniu010d523cc2017-07-13 14:24:23 +0100237 while(window_collapsed.slide_window_slice_3D(slice));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100238}
239
Chunosovd6afedc2017-11-06 22:09:45 +0700240/**< Grid size (obtained through auto-tuning) */
241const unsigned int CLLogits1DMaxShiftExpSumKernel::_grid_size = 64;
242/**< Vector size in the serial case (obtained through auto-tuning) */
243const unsigned int CLLogits1DMaxShiftExpSumKernel::_serial_vector_size = 8;
244/**< Vector size in the parallel case (obtained through auto-tuning, enables the best memory access pattern for Bifrost) .*/
245const unsigned int CLLogits1DMaxShiftExpSumKernel::_parallel_vector_size = 4;
246
247CLLogits1DMaxShiftExpSumKernel::CLLogits1DMaxShiftExpSumKernel()
248 : _input(nullptr), _max(nullptr), _output(nullptr), _sum(nullptr)
249{
250}
251
252void CLLogits1DMaxShiftExpSumKernel::configure(const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, float beta)
253{
254 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
255 ARM_COMPUTE_ERROR_ON_NULLPTR(max, sum, output);
Chunosovd6afedc2017-11-06 22:09:45 +0700256
257 // Output auto initialization if not yet initialized
258 auto_init_if_empty(*sum->info(), max->info()->tensor_shape(), 1, input->info()->data_type(), input->info()->fixed_point_position());
259 auto_init_if_empty(*output->info(), input->info()->tensor_shape(), 1, input->info()->data_type(), input->info()->fixed_point_position());
260
261 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, max, sum);
262 ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(input, output, max, sum);
263 ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
264 ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(max, sum);
265
266 _input = input;
267 _max = max;
268 _output = output;
269 _sum = sum;
270
271 const DataType dt = input->info()->data_type();
272 const size_t reduction_dim_size = input->info()->dimension(0);
273 auto beta_int = static_cast<int>(lround(beta * (1 << input->info()->fixed_point_position())));
274
275 // Set build options
276 CLBuildOptions build_opts;
277 build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(dt));
278 build_opts.add_option_if(is_data_type_fixed_point(dt),
279 "-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position()));
280 build_opts.add_option_if(dt == DataType::F16, "-DUSE_F16");
281 build_opts.add_option_if(is_data_type_fixed_point(dt) && (beta != 1.0f), "-DBETA=" + support::cpp11::to_string(beta_int));
282 build_opts.add_option_if(is_data_type_float(dt) && (beta != 1.0f), "-DBETA=" + float_to_string_with_full_precision(beta));
283
284 // Setting _lws_hint in this way can also communicate grid_size to CLLogits1DMaxShiftExpSumKernel::run().
285 // A single workgroup performs reduction in dimension 0 in the parallel case, hence lws[0]==gws[0].
286 _lws_hint = cl::NullRange;
287 std::string kernel_name = std::string("softmax_layer_max_shift_exp_sum_serial");
288 ParallelReductionInfo parallel_reduction_info = is_parallel_reduction(reduction_dim_size);
289 unsigned int vector_size = std::get<1>(parallel_reduction_info);
290
291 build_opts.add_option("-DVECTOR_SIZE=" + support::cpp11::to_string(vector_size));
292 build_opts.add_option("-DLOG_VECTOR_SIZE=" + support::cpp11::to_string(lround(log2(vector_size))));
293 build_opts.add_option_if((reduction_dim_size % vector_size) != 0, "-DNON_MULTIPLE_OF_VECTOR_SIZE");
294
295 // Configure parallel kernel if needed
296 if(std::get<0>(parallel_reduction_info))
297 {
298 kernel_name = std::string("softmax_layer_max_shift_exp_sum_parallel");
299 bool is_grid_size_pow2 = (_grid_size != 0) && ((_grid_size & (_grid_size - 1)) == 0);
300 build_opts.add_option_if(is_grid_size_pow2 && _grid_size <= 256, "-DGRID_SIZE=" + support::cpp11::to_string(_grid_size));
301
302 // Handle boundary conditions.
303 const unsigned int multiple_grid_size = (reduction_dim_size / vector_size) % _grid_size;
304 build_opts.add_option_if((multiple_grid_size != 0) || ((reduction_dim_size % vector_size) != 0), "-DNON_MULTIPLE_OF_GRID_SIZE");
305 }
306
307 // Create kernel.
308 _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
309
310 // Set static arguments. Both the kernels use the same arguments
311 unsigned int idx = 4 * num_arguments_per_3D_tensor(); //Skip the input and output parameters
312 _kernel.setArg<cl_uint>(idx++, reduction_dim_size);
313
314 // Configure window
315 const unsigned int num_elems_x = ceil_to_multiple(input->info()->tensor_shape().x(), vector_size);
316 Window win = calculate_max_window(*input->info(), Steps(num_elems_x));
317
318 AccessWindowHorizontal input_access(input->info(), 0, num_elems_x);
319 AccessWindowHorizontal max_access(max->info(), 0, 1);
320 AccessWindowHorizontal output_access(output->info(), 0, num_elems_x);
321 AccessWindowHorizontal sum_access(sum->info(), 0, 1);
322
323 update_window_and_padding(win, input_access, max_access, output_access, sum_access);
324
325 output_access.set_valid_region(win, input->info()->valid_region());
326 sum_access.set_valid_region(win, ValidRegion(Coordinates(), sum->info()->tensor_shape()));
327
328 ICLKernel::configure(win);
329}
330
331CLLogits1DMaxShiftExpSumKernel::ParallelReductionInfo CLLogits1DMaxShiftExpSumKernel::is_parallel_reduction(size_t size)
332{
333 bool is_parallel_reduction = (size >= (_grid_size * _serial_vector_size)) && (_grid_size > 1);
334 unsigned int vector_size = is_parallel_reduction ? _parallel_vector_size : _serial_vector_size;
335 return std::make_tuple(is_parallel_reduction, vector_size);
336}
337
338void CLLogits1DMaxShiftExpSumKernel::run(const Window &window, cl::CommandQueue &queue)
339{
340 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
341 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
342
343 // Collapse window in Z dimension
344 Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
345
346 // Reconfigure window in case of parallel reduction
347 ParallelReductionInfo parallel_reduction_info = is_parallel_reduction(_input->info()->dimension(0));
348 if(std::get<0>(parallel_reduction_info))
349 {
350 // To launch grid_size parallel workitems, steps.x should be modified as follows.
351 const unsigned int step = std::get<1>(parallel_reduction_info);
352 window_collapsed.set(Window::DimX, Window::Dimension(0, _grid_size * step, step));
353 }
354
355 // Get slices
356 Window slice = window_collapsed.first_slice_window_3D();
357 do
358 {
359 unsigned int idx = 0;
360 // Set inputs
361 add_3D_tensor_argument(idx, _input, slice);
362 add_3D_tensor_argument(idx, _max, slice);
363 add_3D_tensor_argument(idx, _output, slice);
364 add_3D_tensor_argument(idx, _sum, slice);
365 enqueue(queue, *this, slice, _lws_hint);
366 }
367 while(window_collapsed.slide_window_slice_3D(slice));
368}
369
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100370CLLogits1DNormKernel::CLLogits1DNormKernel()
371 : _input(nullptr), _sum(nullptr), _output(nullptr)
372{
373}
374
Chunosovf450caa2017-11-08 16:09:35 +0700375void CLLogits1DNormKernel::configure(const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, float beta)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100376{
Chunosovf450caa2017-11-08 16:09:35 +0700377 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::S32, DataType::F16, DataType::F32);
Georgios Pinitasd368df32017-07-04 11:06:15 +0100378 ARM_COMPUTE_ERROR_ON_NULLPTR(sum, output);
Chunosovf450caa2017-11-08 16:09:35 +0700379 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, sum);
380 ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(input, sum);
381
382 // Note: output should always have a scale of 1/256 and offset 0
383 const QuantizationInfo allowed_quantization_info = QuantizationInfo(1.f / 256, 0);
384 const bool is_quantized_asymmetric = (input->info()->data_type() == DataType::S32);
385 const DataType output_data_type = is_quantized_asymmetric ? DataType::QASYMM8 : input->info()->data_type();
Georgios Pinitasd368df32017-07-04 11:06:15 +0100386
387 // Output auto initialization if not yet initialized
Chunosovf450caa2017-11-08 16:09:35 +0700388 auto_init_if_empty(*output->info(),
389 input->info()->tensor_shape(),
390 1,
391 output_data_type,
392 input->info()->fixed_point_position(),
393 allowed_quantization_info);
Georgios Pinitasd368df32017-07-04 11:06:15 +0100394
Georgios Pinitase5f8fd62017-06-23 18:03:44 +0100395 ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
Chunosovf450caa2017-11-08 16:09:35 +0700396 if(!is_quantized_asymmetric)
397 {
398 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
399 ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(input, output);
400 }
401 else
402 {
403 ARM_COMPUTE_ERROR_ON(output->info()->quantization_info() != allowed_quantization_info);
404 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100405
406 _input = input;
407 _sum = sum;
408 _output = output;
409
410 // Set build options
Chunosovf450caa2017-11-08 16:09:35 +0700411 CLBuildOptions build_opts;
412 build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
413 build_opts.add_option_if(is_data_type_fixed_point(input->info()->data_type()),
414 "-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position()));
415 build_opts.add_options_if(is_quantized_asymmetric,
416 prepare_quantized_softmax_build_options(input->info()->quantization_info().scale, beta).options());
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100417
418 // Create kernel
Chunosovf450caa2017-11-08 16:09:35 +0700419 std::string kernel_name = is_quantized_asymmetric ? "softmax_layer_norm_quantized" : "softmax_layer_norm";
420 _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100421
422 // Configure window
423 constexpr unsigned int num_elems_processed_per_iteration = 16;
424
425 Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
426
427 AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration);
428 AccessWindowStatic sum_access(sum->info(), 0, 0, 1, sum->info()->dimension(1));
429 AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
430
431 update_window_and_padding(win, input_access, sum_access, output_access);
432
433 output_access.set_valid_region(win, input->info()->valid_region());
434
435 ICLKernel::configure(win);
436}
437
438void CLLogits1DNormKernel::run(const Window &window, cl::CommandQueue &queue)
439{
440 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
441 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
442
steniu010d523cc2017-07-13 14:24:23 +0100443 Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
444 Window slice = window_collapsed.first_slice_window_3D();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100445
446 do
447 {
448 Window sum_slice = slice;
449 sum_slice.set(Window::DimX, Window::Dimension(0, 1, 1));
450
451 unsigned int idx = 0;
452 // Set inputs
steniu010d523cc2017-07-13 14:24:23 +0100453 add_3D_tensor_argument(idx, _input, slice);
454 add_3D_tensor_argument(idx, _sum, sum_slice);
455 add_3D_tensor_argument(idx, _output, slice);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100456 enqueue(queue, *this, slice);
457 }
steniu010d523cc2017-07-13 14:24:23 +0100458 while(window_collapsed.slide_window_slice_3D(slice));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100459}